source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_binop__minus_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint32) // A*D function (colscale): GB (_AxD__minus_uint32) // D*A function (rowscale): GB (_DxB__minus_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint32) // C=scalar+B GB (_bind1st__minus_uint32) // C=scalar+B' GB (_bind1st_tran__minus_uint32) // C=A+scalar GB (_bind2nd__minus_uint32) // C=A'+scalar GB (_bind2nd_tran__minus_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT32 || GxB_NO_MINUS_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mobilenet_32.c
/* Pretrained MobileNet Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: ZFTurbo/jocare Compilation: gcc -O3 MobileNet_CPU_cifar.c -lm -fopenmp -o MobileNet_CPU_cifar Usage: MobileNet_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: MobileNet_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <ctype.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 32 #define CONV_SIZE 3 #define CONV_LEVELS 27 //#define _CRT_SECURE_NO_WARNINGS 1 // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING \ 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS static double pw_conv_time = 0.0; static double dense_time = 0.0; /****************************************************************************************************************************/ int im_sizes[27] = {32, 32, 16, 16, 16, 16, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2}; int strides[26] = {1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1}; int mem_block_shape[3] = { 1024, 32, 32}; // allocate the absolute maximum amount of space we will need float ***block1; float ***block2; float *****wc; // weights convolution float ***wd; // weights dense float **bd; // biases dense float **batchnorm_weights; float **batchnorm_biases; float **batchnorm_means; // running mean and variance from training used to // estimate population statistics float **batchnorm_vars; int mem_block_dense_shape = { 1024 * 2 * 2}; // size of output from last convolutional layer float *mem_block1_dense; float *mem_block2_dense; #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions /* // ORIGINAL FISHER EXPERIMENTS int cshape[27][4] = { { 32, 3, CONV_SIZE, CONV_SIZE }, { 32, 1, CONV_SIZE, CONV_SIZE }, { 43, 32, 1, 1 }, { 43, 1, CONV_SIZE, CONV_SIZE }, { 85, 43, 1, 1 }, { 85, 1, CONV_SIZE, CONV_SIZE }, { 70, 85, 1, 1 }, { 70, 1, CONV_SIZE, CONV_SIZE }, { 150, 70, 1, 1 }, { 150, 1, CONV_SIZE, CONV_SIZE }, { 69, 150, 1, 1 }, { 69, 1, CONV_SIZE, CONV_SIZE }, { 188, 69, 1, 1 }, { 188, 1, CONV_SIZE, CONV_SIZE }, { 72, 188, 1, 1 }, { 72, 1, CONV_SIZE, CONV_SIZE }, { 122, 72, 1, 1 }, { 122, 1, CONV_SIZE, CONV_SIZE }, { 106, 122, 1, 1 }, { 106, 1, CONV_SIZE, CONV_SIZE }, { 96, 106, 1, 1 }, { 96, 1, CONV_SIZE, CONV_SIZE }, { 81, 96, 1, 1 }, { 81, 1, CONV_SIZE, CONV_SIZE }, { 75, 81, 1, 1 }, { 75, 1, CONV_SIZE, CONV_SIZE }, { 100, 75, 1, 1 } }; int dshape[1][2]= { { 100, 10} }; */ // FIXED 90% ACCURACY EXPERIMENTS int cshape[27][4] = {{32, 3, CONV_SIZE, CONV_SIZE}, {32, 1, CONV_SIZE, CONV_SIZE}, {43, 32, 1, 1}, {43, 1, CONV_SIZE, CONV_SIZE}, {85, 43, 1, 1}, {85, 1, CONV_SIZE, CONV_SIZE}, {70, 85, 1, 1}, {70, 1, CONV_SIZE, CONV_SIZE}, {150, 70, 1, 1}, {150, 1, CONV_SIZE, CONV_SIZE}, {69, 150, 1, 1}, {69, 1, CONV_SIZE, CONV_SIZE}, {188, 69, 1, 1}, {188, 1, CONV_SIZE, CONV_SIZE}, {72, 188, 1, 1}, {72, 1, CONV_SIZE, CONV_SIZE}, {122, 72, 1, 1}, {122, 1, CONV_SIZE, CONV_SIZE}, {106, 122, 1, 1}, {106, 1, CONV_SIZE, CONV_SIZE}, {96, 106, 1, 1}, {96, 1, CONV_SIZE, CONV_SIZE}, {81, 96, 1, 1}, {81, 1, CONV_SIZE, CONV_SIZE}, {75, 81, 1, 1}, {75, 1, CONV_SIZE, CONV_SIZE}, {100, 75, 1, 1} }; int dshape[1][2] = {{100, 10}}; #else // PLAIN int cshape[27][4] = {{32, 3, CONV_SIZE, CONV_SIZE}, {32, 1, CONV_SIZE, CONV_SIZE}, {64, 32, 1, 1}, {64, 1, CONV_SIZE, CONV_SIZE}, {128, 64, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {128, 128, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {256, 128, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {256, 256, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {512, 256, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {1024, 512, 1, 1}, {1024, 1, CONV_SIZE, CONV_SIZE}, {1024, 1024, 1, 1}}; int dshape[1][2] = {{1024, 10}}; #endif // FISHER_PRUNING /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.0; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.0; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; int max_channels = 1024; int max_im_size = 32; block1 = malloc(max_channels * sizeof(float **)); block2 = malloc(max_channels * sizeof(float **)); // allocate block memory for (i = 0; i < max_channels; i++) { block1[i] = malloc(max_im_size * sizeof(float *)); block2[i] = malloc(max_im_size * sizeof(float *)); for (j = 0; j < max_im_size; j++) { block1[i][j] = malloc(max_im_size * sizeof(float)); block2[i][j] = malloc(max_im_size * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t ****)malloc(CONV_LEVELS * sizeof(csr_t ***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t ***)malloc(cshape[l][0] * sizeof(csr_t **)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t **)malloc(cshape[l][1] * sizeof(csr_t *)); } } // wc memory allocated below will be freed in read_weights if // SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS wc = malloc(CONV_LEVELS * sizeof(float ****)); // allocate kernel memory for (l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float ***)); for (i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float *)); for (k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float)); } } } } // allocate batchnorm memory batchnorm_weights = malloc(27 * sizeof(float *)); batchnorm_biases = malloc(27 * sizeof(float *)); batchnorm_means = malloc(27 * sizeof(float *)); batchnorm_vars = malloc(27 * sizeof(float *)); for (l = 0; l < CONV_LEVELS; l++) { batchnorm_weights[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_biases[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_means[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_vars[l] = malloc(cshape[l][0] * sizeof(float)); } wd = malloc(1 * sizeof(float **)); bd = malloc(1 * sizeof(float *)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float *)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // allocate dense memory mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS // Free dense weights for (l = 0; l < 1; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(block1[i][j]); free(block2[i][j]); } free(block1[i]); free(block2[i]); } free(block1); free(block2); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { float dval; int i, j, k, l, m, z; FILE *iin; int total_lvls_read = 0; // printf("\nin_file es: %s\n\n", in_file); iin = fopen(in_file, "r"); if (iin == NULL) { printf("Weights file %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (l = 0; l < CONV_LEVELS; l++) { printf("Read conv block %d weights\n", l); for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { for (m = 0; m < cshape[l][3]; m++) { fscanf(iin, "%f", &dval); wc[l][i][j][k][m] = dval; } } } } total_lvls_read += 1; } for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_means[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_vars[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); // printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { // printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); csr_t *a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); // print_csr(a); wc_sparse[l][i][j] = a; // printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float *****)malloc(1 * sizeof(float ****)); wc_first_conv[l] = (float ****)malloc(cshape[l][0] * sizeof(float ***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float ***)malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float **)malloc(cshape[l][2] * sizeof(float *)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float *)malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created // above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("Image file %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); block1[l][i][j] = dval; } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; float zeropad[size + 2][size + 2]; memset(zeropad, 0, ((size + 2) * (size + 2) * sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i = i + stride) { for (j = 0; j < size; j = j + stride) { sum = zeropad[i][j] * kernel[0][0] + zeropad[i][j + 1] * kernel[0][1] + zeropad[i][j + 2] * kernel[0][2] + zeropad[i + 1][j] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } } /****************************************************************************************************************************/ /****************************************************************************************************************************/ void pointwise_convolution(float ****point_kernel, float ***block2, float ***block1, int input_channels, int output_channels, int image_size) { struct timeval start, end; gettimeofday(&start, NULL); int i, j, k, l; float sum; for (i = 0; i < output_channels; i++) { for (j = 0; j < image_size; j++) { for (k = 0; k < image_size; k++) { sum = 0.; for (l = 0; l < input_channels; l++) { sum += block2[l][j][k] * point_kernel[i][l][0] [0]; // 0 because they are always 1x1 filters } block1[i][j][k] = sum; } } } gettimeofday(&end, NULL); pw_conv_time += get_seconds(start, end); } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, float *mean, float *var, int num_channels, int image_size) { int channel, i, j; // ((x - mean) * invstd) * w + b #pragma omp parallel for private(channel, i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (channel = 0; channel < num_channels; channel++) { float invstd = 1. / sqrt(var[channel] + 0.000001); for (i = 0; i < image_size; i++) { for (j = 0; j < image_size; j++) { out[channel][i][j] = (weights[channel] * invstd) * in[channel][i][j] + (bias[channel] - ((weights[channel] * mean[channel]) * invstd)); // out[channel][i][j] = ((in[channel][i][j] - mean[channel]) * invstd) * // weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ void depthwise_convolution(float ***block1, float ***block2, float ****depth_kernel, float ****point_kernel, int level) { int i, j; int input_channels = cshape[level][0]; int output_channels = cshape[level + 1][0]; // printf("level %i: %i ==> %i\n", level, input_channels, output_channels); #pragma omp parallel for private(i) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < input_channels; i++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(block1[i], wc_sparse[level][i][0], block2[i], im_sizes[level], strides[level]); #else convolution_3_x_3(block1[i], depth_kernel[i][0], block2[i], im_sizes[level], strides[level]); #endif } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], input_channels, im_sizes[level + 1]); reset_mem_block(block2); level++; // now do linear combination of the elements in output and write them back // into the first memory block #if SPARSE_CONVOLUTIONS #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < output_channels; i++) { for (j = 0; j < input_channels; j++) { pointwise_convolution_sparse(block2[j], wc_sparse[level][i][j], block1[j], im_sizes[level]); } } #else pointwise_convolution(point_kernel, block1, block2, input_channels, output_channels, im_sizes[level]); #endif batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], output_channels, im_sizes[level + 1]); reset_mem_block(block2); } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; // printf("%f\n", out[i]); if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { struct timeval start, end; gettimeofday(&start, NULL); int i, j; for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } gettimeofday(&end, NULL); dense_time += get_seconds(start, end); } /****************************************************************************************************************************/ void write_out_block(int layer, float ***block) { int layer_name = layer; // * 2 - 1; char filename[16]; sprintf(filename, "outputs/output%d", layer_name); FILE *f = fopen(filename, "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < 32; i++) { for (int j = 0; j < mem_block_shape[1]; j++) { for (int k = 0; k < mem_block_shape[2]; k++) { fprintf(f, "%f \n", block[i][j][k]); } } } fclose(f); } /****************************************************************************************************************************/ void write_out_layer(int layer) { int layer_name = layer; // * 2 - 1; char filename[7]; sprintf(filename, "layer%d", layer_name); FILE *f = fopen(filename, "w"); int depth = 1; if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int o = 0; o < cshape[layer][0]; o++) { for (int i = 0; i < cshape[layer][1]; i++) { for (int k_h = 0; k_h < cshape[layer][2]; k_h++) { for (int k_w = 0; k_w < cshape[layer][3]; k_w++) { fprintf(f, "%f ", wc[layer][o][i][k_h][k_w]); } } fprintf(f, "\n"); } } fclose(f); layer_name = layer + 1; char filename2[7]; sprintf(filename2, "layer%d", layer_name); // get batchnorms FILE *f2 = fopen(filename2, "w"); if (f2 == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_weights[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_biases[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_means[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_vars[layer][i]); } fclose(f); } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c = 0; if (only_convolution == 1) { // for (i = 0; i < 512*7*7; i++) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g\n", mem_block1_dense[i]); } } else { double maximum = -1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g\n", mem_block2_dense[i]); if (mem_block1_dense[i] > maximum) { maximum = mem_block2_dense[i]; c = i + 1; } } fprintf(out, "\n"); printf("This image depicts class: %d\n", c); } } /****************************************************************************************************************************/ void get_mobilenet_predict() { int level = 0; int i, j; // normal convolution #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(block1[j], wc_sparse[level][i][j], block2[i], im_sizes[level], 1); #else convolution_3_x_3(block1[j], wc[level][i][j], block2[i], im_sizes[level], 1); #endif } } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], 32, 32); reset_mem_block(block2); // depthwise convolutions for (level = 1; level < (CONV_LEVELS - 1); level = level + 2) { depthwise_convolution(block1, block2, wc[level], wc[level + 1], (level)); } // flatten flatten(block1, mem_block1_dense, cshape[level][0], im_sizes[level], im_sizes[level]); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 0); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf( "Usage: <program.exe> <weights file> <images list file> <output file> " "<only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; // printf("%s\n", weights_file); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = 20; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { pw_conv_time = 0.0; dense_time = 0.0; fgets(buf, 1024, file_list); if (strlen(buf) == 0) { break; } // printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); gettimeofday(&tStart, NULL); get_mobilenet_predict(); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); printf("pw_conv time: %.3lf sec\n", pw_conv_time); printf("dense time: %.3lf sec\n", dense_time); output_predictions(results, only_convolution, 1024, 1); } // free_memory(); fclose(file_list); return 0; }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program // to determine the following heuristic. // EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h, // unless it has been specialized by the user or for a given architecture. // Note that the condition rhs.rows()>0 was required because lazy produc is (was?) not happy with empty inputs. // I'm not sure it is still required. if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::evalTo(dst, lhs, rhs); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::addTo(dst, lhs, rhs); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::subTo(dst, lhs, rhs); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
pzgetrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (plasma_complex64_t*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_pzgetrf(plasma_desc_t A, int *ipiv, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; // Read parameters from the context. plasma_context_t *plasma = plasma_context_self(); // Set tiling parameters. int ib = plasma->ib; int minmtnt = imin(A.mt, A.nt); for (int k = 0; k < minmtnt; k++) { plasma_complex64_t *a00, *a20; a00 = A(k, k); a20 = A(A.mt-1, k); // Create fake dependencies of the whole panel on its individual tiles. // These tasks are inserted to generate a correct DAG rather than // doing any useful work. for (int m = k+1; m < A.mt-1; m++) { plasma_complex64_t *amk = A(m, k); #pragma omp task depend (in:amk[0]) \ depend (inout:a00[0]) \ priority(1) { // Do some funny work here. It appears so that the compiler // might not insert the task if it is completely empty. int l = 1; l++; } } int ma00k = (A.mt-k-1)*A.mb; int na00k = plasma_tile_nmain(A, k); int lda20 = plasma_tile_mmain(A, A.mt-1); int nvak = plasma_tile_nview(A, k); int mvak = plasma_tile_mview(A, k); int ldak = plasma_tile_mmain(A, k); int num_panel_threads = imin(plasma->max_panel_threads, minmtnt-k); // panel #pragma omp task depend(inout:a00[0:ma00k*na00k]) \ depend(inout:a20[0:lda20*nvak]) \ depend(out:ipiv[k*A.mb:mvak]) \ priority(1) { volatile int *max_idx = (int*)malloc(num_panel_threads*sizeof(int)); if (max_idx == NULL) plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory); volatile plasma_complex64_t *max_val = (plasma_complex64_t*)malloc(num_panel_threads*sizeof( plasma_complex64_t)); if (max_val == NULL) plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory); volatile int info = 0; plasma_barrier_t barrier; plasma_barrier_init(&barrier); if (sequence->status == PlasmaSuccess) { // If nesting would not be expensive on architectures such as // KNL, this would resolve the issue with deadlocks caused by // tasks expected to run are in fact not launched. //#pragma omp parallel for shared(barrier) // schedule(dynamic,1) // num_threads(num_panel_threads) #pragma omp taskloop untied shared(barrier) \ num_tasks(num_panel_threads) \ priority(2) for (int rank = 0; rank < num_panel_threads; rank++) { { plasma_desc_t view = plasma_desc_view(A, k*A.mb, k*A.nb, A.m-k*A.mb, nvak); plasma_core_zgetrf(view, &ipiv[k*A.mb], ib, rank, num_panel_threads, max_idx, max_val, &info, &barrier); if (info != 0) plasma_request_fail(sequence, request, k*A.mb+info); } } } #pragma omp taskwait free((void*)max_idx); free((void*)max_val); for (int i = k*A.mb+1; i <= imin(A.m, k*A.mb+nvak); i++) ipiv[i-1] += k*A.mb; } // update for (int n = k+1; n < A.nt; n++) { plasma_complex64_t *a01, *a11, *a21; a01 = A(k, n); a11 = A(k+1, n); a21 = A(A.mt-1, n); int ma11k = (A.mt-k-2)*A.mb; int na11n = plasma_tile_nmain(A, n); int lda21 = plasma_tile_mmain(A, A.mt-1); int nvan = plasma_tile_nview(A, n); #pragma omp task depend(in:a00[0:ma00k*na00k]) \ depend(in:a20[0:lda20*nvak]) \ depend(in:ipiv[k*A.mb:mvak]) \ depend(inout:a01[0:ldak*nvan]) \ depend(inout:a11[0:ma11k*na11n]) \ depend(inout:a21[0:lda21*nvan]) \ priority(n == k+1) { if (sequence->status == PlasmaSuccess) { // geswp int k1 = k*A.mb+1; int k2 = imin(k*A.mb+A.mb, A.m); plasma_desc_t view = plasma_desc_view(A, 0, n*A.nb, A.m, nvan); plasma_core_zgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1); // trsm plasma_core_ztrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, mvak, nvan, 1.0, A(k, k), ldak, A(k, n), ldak); // gemm for (int m = k+1; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); #pragma omp task priority(n == k+1) { plasma_core_zgemm( PlasmaNoTrans, PlasmaNoTrans, mvam, nvan, A.nb, -1.0, A(m, k), ldam, A(k, n), ldak, 1.0, A(m, n), ldam); } } } #pragma omp taskwait } } } // Multidependency of the whole ipiv on the individual chunks // corresponding to tiles. for (int m = 0; m < minmtnt; m++) { // insert dummy task #pragma omp task depend (in:ipiv[m*A.mb]) \ depend (inout:ipiv[0]) { int l = 1; l++; } } // pivoting to the left for (int k = 0; k < minmtnt-1; k++) { plasma_complex64_t *a10, *a20; a10 = A(k+1, k); a20 = A(A.mt-1, k); int ma10k = (A.mt-k-2)*A.mb; int na00k = plasma_tile_nmain(A, k); int lda20 = plasma_tile_mmain(A, A.mt-1); int nvak = plasma_tile_nview(A, k); #pragma omp task depend(in:ipiv[0:imin(A.m,A.n)]) \ depend(inout:a10[0:ma10k*na00k]) \ depend(inout:a20[0:lda20*nvak]) { if (sequence->status == PlasmaSuccess) { plasma_desc_t view = plasma_desc_view(A, 0, k*A.nb, A.m, A.nb); int k1 = (k+1)*A.mb+1; int k2 = imin(A.m, A.n); plasma_core_zgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1); } } // Multidependency of individual tiles on the whole panel. for (int m = k+2; m < A.mt-1; m++) { plasma_complex64_t *amk = A(m, k); #pragma omp task depend (in:a10[0]) \ depend (inout:amk[0]) { // Do some funny work here. It appears so that the compiler // might not insert the task if it is completely empty. int l = 1; l++; } } } }
GB_binop__bxor_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_int8) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_int8) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_int8) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_int8) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int8) // C=scalar+B GB (_bind1st__bxor_int8) // C=scalar+B' GB (_bind1st_tran__bxor_int8) // C=A+scalar GB (_bind2nd__bxor_int8) // C=A'+scalar GB (_bind2nd_tran__bxor_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_INT8 || GxB_NO_BXOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lu.orio.par.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) double L[N][N]; double U[N][N]; double A[N][N+13]; void init_arrays() { int i, j, k; /* have to initialize this matrix properly to prevent * division by zero */ for (i=0; i<N; i++) { for (j=0; j<N; j++) { L[i][j] = 0.0; U[i][j] = 0.0; } } for (i=0; i<N; i++) { for (j=0; j<=i; j++) { L[i][j] = i+j+1; U[j][i] = i+j+1; } } for (i=0; i<N; i++) { for (j=0; j<N; j++) { for (k=0; k<N; k++) { A[i][j] += L[i][k]*U[k][j]; } } } } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); /*@ begin PerfTuning ( def build { arg build_command = 'icc -O3 -openmp -lm'; } def performance_counter { arg repetitions = 1; } def performance_params { param T1_1[] = [16]; param T1_2[] = [128]; param T1_3[] = [16]; param T2_1[] = [4]; param T2_2[] = [1]; param T2_3[] = [4]; constraint c1 = (T1_1*T2_1<=1024 and T1_1*T2_1<=1024 and T1_1*T2_1<=1024); constraint c2 = ((T1_1 == T1_3) and (T2_1 == T2_3)); param U1[] = [3,5,7]; param U2[] = [1]; param U3[] = [3,5,7]; constraint c3 = (U1*U2*U3<=256); param PERM[] = [ # [0,1,2], # [0,2,1], # [1,0,2], # [1,2,0], [2,0,1], # [2,1,0], ]; param PAR[] = [True]; param SCREP[] = [True]; param IVEC[] = [True]; param RECTILE[] = [False]; } def search { arg algorithm = 'Exhaustive'; # arg algorithm = 'Simplex'; # arg total_runs = 1; } def input_params { param N[] = [2000]; } def input_vars { arg decl_file = 'decl_code.h'; arg init_file = 'init_code.c'; } ) @*/ /**-- (Generated by Orio) Best performance cost: 0.413585 Tuned for specific problem sizes: N = 2000 Best performance parameters: IVEC = True PAR = True PERM = [2, 0, 1] RECTILE = False SCREP = True T1_1 = 16 T1_2 = 128 T1_3 = 16 T2_1 = 4 T2_2 = 1 T2_3 = 4 U1 = 5 U2 = 1 U3 = 5 --**/ register int i,j,k; register int c1t, c2t, c3t, c4t, c5t, c6t, c7t, c8t, c9t, c10t, c11t, c12t; register int newlb_c1, newlb_c2, newlb_c3, newlb_c4, newlb_c5, newlb_c6, newlb_c7, newlb_c8, newlb_c9, newlb_c10, newlb_c11, newlb_c12; register int newub_c1, newub_c2, newub_c3, newub_c4, newub_c5, newub_c6, newub_c7, newub_c8, newub_c9, newub_c10, newub_c11, newub_c12; /*@ begin PolySyn( parallel = PAR; tiles = [T1_1,T1_2,T1_3,T2_1,T2_2,T2_3]; permut = PERM; unroll_factors = [U1,U2,U3]; rect_regtile = RECTILE; scalar_replace = SCREP; vectorize = IVEC; profiling_code = 'lu_profiling.c'; compile_cmd = 'gcc'; compile_opts = '-lm'; ) @*/#include <math.h> #include <assert.h> #include <omp.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) int c1, c2, c3, c4, c5, c6, c7, c8, c9; register int lb, ub, lb1, ub1, lb2, ub2; /* polysyn start */ if (N >= 2) { for (c1=-1;c1<=floord(3*N-5,128);c1++) { lb1=max(max(0,ceild(64*c1-N+2,64)),ceild(32*c1-63,96)); ub1=min(floord(64*c1+63,64),floord(N-1,128)); #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9) for (c2=lb1; c2<=ub1; c2++) { for (c3=max(ceild(32*c1-32*c2-1953,2016),ceild(32*c1-32*c2-31,32));c3<=floord(N-1,64);c3++) { for (c4=max(max(0,4*c1-4*c2-192*c3-186),4*c1-4*c2);c4<=min(min(min(min(4*c1-4*c2+3,floord(N-2,16)),floord(64*c2+63,8)),floord(480*c3+465,8)),floord(32*c3+31,8));c4++) { for (c6=max(max(max(max(ceild(-4*c1+4*c2+4*c3+c4-45,49),ceild(4*c1-4*c2-4*c3-c4-45,47)),ceild(8*c4-105,120)),4*c3),ceild(8*c4-7,8));c6<=min(4*c3+3,floord(N-1,16));c6++) { if ((c1 == c2+c3) && (c4 == c6)) { for (c7=max(0,16*c6);c7<=min(min(N-2,16*c6+14),128*c2+126);c7++) { for (c8=max(128*c2,c7+1);c8<=min(128*c2+127,N-1);c8++) { A[c7][c8]=A[c7][c8]/A[c7][c7]; for (c9=c7+1;c9<=min(N-1,16*c6+15);c9++) { A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8]; } } } } /*@ begin Loop( transform Composite( permut = [['c9', 'c7', 'c8']], regtile = (['c7', 'c8', 'c9'],[5, 1, 5]), scalarreplace = (True, 'double'), vector = (True, ['ivdep','vector always'])) for (c7=max(0,16*c4);c7<=min(min(16*c6-1,16*c4+15),128*c2+126);c7++) { for (c8=max(128*c2,c7+1);c8<=min(N-1,128*c2+127);c8++) { for (c9=16*c6;c9<=min(N-1,16*c6+15);c9++) { A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8]; } } } ) @*/{ for (c9t=16*c6; c9t<=min(N-1,16*c6+15)-4; c9t=c9t+5) { for (c7t=max(0,16*c4); c7t<=min(min(16*c6-1,16*c4+15),128*c2+126)-4; c7t=c7t+5) { for (c7=c7t; c7<=c7t+4; c7=c7+1) { register int cbv_1, cbv_2; cbv_1=max(128*c2,c7+1); cbv_2=min(N-1,128*c2+127); #pragma ivdep #pragma vector always for (c8=cbv_1; c8<=cbv_2; c8++ ) { double scv_1, scv_2, scv_3, scv_4, scv_5, scv_6; scv_1=A[(c9t+2)][c8]; scv_2=A[c9t][c8]; scv_3=A[(c9t+4)][c8]; scv_4=A[(c9t+1)][c8]; scv_5=A[c7][c8]; scv_6=A[(c9t+3)][c8]; scv_2=scv_2-A[c9t][c7]*scv_5; scv_4=scv_4-A[(c9t+1)][c7]*scv_5; scv_1=scv_1-A[(c9t+2)][c7]*scv_5; scv_6=scv_6-A[(c9t+3)][c7]*scv_5; scv_3=scv_3-A[(c9t+4)][c7]*scv_5; A[(c9t+2)][c8]=scv_1; A[c9t][c8]=scv_2; A[(c9t+4)][c8]=scv_3; A[(c9t+1)][c8]=scv_4; A[(c9t+3)][c8]=scv_6; } } } for (c7=c7t; c7<=min(min(16*c6-1,16*c4+15),128*c2+126); c7=c7+1) { register int cbv_3, cbv_4; cbv_3=max(128*c2,c7+1); cbv_4=min(N-1,128*c2+127); #pragma ivdep #pragma vector always for (c8=cbv_3; c8<=cbv_4; c8++ ) { double scv_7, scv_8, scv_9, scv_10, scv_11, scv_12; scv_7=A[(c9t+2)][c8]; scv_8=A[c9t][c8]; scv_9=A[(c9t+4)][c8]; scv_10=A[(c9t+1)][c8]; scv_11=A[c7][c8]; scv_12=A[(c9t+3)][c8]; scv_8=scv_8-A[c9t][c7]*scv_11; scv_10=scv_10-A[(c9t+1)][c7]*scv_11; scv_7=scv_7-A[(c9t+2)][c7]*scv_11; scv_12=scv_12-A[(c9t+3)][c7]*scv_11; scv_9=scv_9-A[(c9t+4)][c7]*scv_11; A[(c9t+2)][c8]=scv_7; A[c9t][c8]=scv_8; A[(c9t+4)][c8]=scv_9; A[(c9t+1)][c8]=scv_10; A[(c9t+3)][c8]=scv_12; } } } for (c9=c9t; c9<=min(N-1,16*c6+15); c9=c9+1) { for (c7t=max(0,16*c4); c7t<=min(min(16*c6-1,16*c4+15),128*c2+126)-4; c7t=c7t+5) { for (c7=c7t; c7<=c7t+4; c7=c7+1) { register int cbv_5, cbv_6; cbv_5=max(128*c2,c7+1); cbv_6=min(N-1,128*c2+127); #pragma ivdep #pragma vector always for (c8=cbv_5; c8<=cbv_6; c8++ ) { double scv_13; scv_13=A[c9][c8]; scv_13=scv_13-A[c9][c7]*A[c7][c8]; A[c9][c8]=scv_13; } } } for (c7=c7t; c7<=min(min(16*c6-1,16*c4+15),128*c2+126); c7=c7+1) { register int cbv_7, cbv_8; cbv_7=max(128*c2,c7+1); cbv_8=min(N-1,128*c2+127); #pragma ivdep #pragma vector always for (c8=cbv_7; c8<=cbv_8; c8++ ) { double scv_14; scv_14=A[c9][c8]; scv_14=scv_14-A[c9][c7]*A[c7][c8]; A[c9][c8]=scv_14; } } } } /*@ end @*/ if ((c1 == c2+c3) && (c2 >= ceild(16*c4-111,128)) && (-c4 == -c6) && (c4 <= floord(N-17,16))) { for (c8=max(16*c4+16,128*c2);c8<=min(N-1,128*c2+127);c8++) { A[16*c4+15][c8]=A[16*c4+15][c8]/A[16*c4+15][16*c4+15]; } } } } } } } } /* polysyn end */ /*@ end @*/ /*@ end @*/ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; #ifndef TEST printf("%f\n", annot_t_total); #else { int i, j; for (i=0; i<N; i++) { for (j=0; j<N; j++) { if (j%100==0) printf("\n"); printf("%f ",A[i][j]); } printf("\n"); } } #endif return ((int) A[0][0]); }
ConvolutionFilter.h
/* * ConvolutionFilter.h * * Created on: 12.07.2013 * Author: darius */ #ifndef CONVOLUTIONFILTER_H_ #define CONVOLUTIONFILTER_H_ #include "../BaseObject.h" #include "../DataStructures/Matrix.h" namespace Lazarus { template<typename T> class ConvolutionFilter: public Lazarus::BaseObject { public: static Lazarus::Matrix2<double>* get_Sobel_X_KERNEL() { static Lazarus::Matrix2<double> _SOBEL_KERNEL; _SOBEL_KERNEL.initMatrix(3,3); _SOBEL_KERNEL.setData(0,0,-1.0); _SOBEL_KERNEL.setData(0,1,0.0); _SOBEL_KERNEL.setData(0,2,1.0); _SOBEL_KERNEL.setData(1,0,-2.0); _SOBEL_KERNEL.setData(1,1,0.0); _SOBEL_KERNEL.setData(1,2,+2.0); _SOBEL_KERNEL.setData(2,0,-1.0); _SOBEL_KERNEL.setData(2,1,0.0); _SOBEL_KERNEL.setData(2,2,1.0); return &_SOBEL_KERNEL; } static Lazarus::Matrix2<double>* get_Sobel_Y_KERNEL() { static Lazarus::Matrix2<double> _SOBEL_KERNEL; _SOBEL_KERNEL.initMatrix(3,3); _SOBEL_KERNEL.setData(0,0,1.0); _SOBEL_KERNEL.setData(0,1,2.0); _SOBEL_KERNEL.setData(0,2,1.0); _SOBEL_KERNEL.setData(1,0,0.0); _SOBEL_KERNEL.setData(1,1,0.0); _SOBEL_KERNEL.setData(1,2,0.0); _SOBEL_KERNEL.setData(2,0,-1.0); _SOBEL_KERNEL.setData(2,1,-2.0); _SOBEL_KERNEL.setData(2,2,-1.0); return &_SOBEL_KERNEL; } static Lazarus::Matrix2<double>* get_UNSHARP5x5_KERNEL() { static Lazarus::Matrix2<double> _SHARPEN_KERNEL; _SHARPEN_KERNEL.initMatrix(5,5); _SHARPEN_KERNEL.setData(0,0,-1.0/256.0); _SHARPEN_KERNEL.setData(0,1,-4.0/256.0); _SHARPEN_KERNEL.setData(0,2,-6.0/256.0); _SHARPEN_KERNEL.setData(0,3,-4.0/256.0); _SHARPEN_KERNEL.setData(0,4,-1.0/256.0); _SHARPEN_KERNEL.setData(1,0,-4.0/256.0); _SHARPEN_KERNEL.setData(1,1,-16.0/256.0); _SHARPEN_KERNEL.setData(1,2,-24.0/256.0); _SHARPEN_KERNEL.setData(1,3,-16.0/256.0); _SHARPEN_KERNEL.setData(1,4,-4.0/256.0); _SHARPEN_KERNEL.setData(2,0,-6.0/256.0); _SHARPEN_KERNEL.setData(2,1,-24.0/256.0); _SHARPEN_KERNEL.setData(2,2,476.0/256.0); _SHARPEN_KERNEL.setData(2,3,-24.0/256.0); _SHARPEN_KERNEL.setData(2,4,-6.0/256.0); _SHARPEN_KERNEL.setData(3,0,-4.0/256.0); _SHARPEN_KERNEL.setData(3,1,-16.0/256.0); _SHARPEN_KERNEL.setData(3,2,-24.0/256.0); _SHARPEN_KERNEL.setData(3,3,-16.0/256.0); _SHARPEN_KERNEL.setData(3,4,-4.0/256.0); _SHARPEN_KERNEL.setData(4,0,-1.0/256.0); _SHARPEN_KERNEL.setData(4,1,-4.0/256.0); _SHARPEN_KERNEL.setData(4,2,-6.0/256.0); _SHARPEN_KERNEL.setData(4,3,-4.0/256.0); _SHARPEN_KERNEL.setData(4,4,-1.0/256.0); return &_SHARPEN_KERNEL; } static Lazarus::Matrix2<double>* get_SHARPEN_KERNEL() { static Lazarus::Matrix2<double> _SHARPEN_KERNEL; _SHARPEN_KERNEL.initMatrix(3,3); _SHARPEN_KERNEL.setData(0,0,1.0/16.0); _SHARPEN_KERNEL.setData(0,1,2.0/16.0); _SHARPEN_KERNEL.setData(0,2,1.0/16.0); _SHARPEN_KERNEL.setData(1,0,2.0/16.0); _SHARPEN_KERNEL.setData(1,1,4.0/16.0); _SHARPEN_KERNEL.setData(1,2,2.0/16.0); _SHARPEN_KERNEL.setData(2,0,1.0/16.0); _SHARPEN_KERNEL.setData(2,1,2.0/16.0); _SHARPEN_KERNEL.setData(2,2,1.0/16.0); return &_SHARPEN_KERNEL; } static Lazarus::Matrix2<double>* get_GAUSSIAN_BLUR_KERNEL() { static Lazarus::Matrix2<double> _GBLUR_KERNEL; _GBLUR_KERNEL.initMatrix(3,3); _GBLUR_KERNEL.setData(0,0,1.0/16.0); _GBLUR_KERNEL.setData(0,1,2.0/16.0); _GBLUR_KERNEL.setData(0,2,1.0/16.0); _GBLUR_KERNEL.setData(1,0,2.0/16.0); _GBLUR_KERNEL.setData(1,1,4.0/16.0); _GBLUR_KERNEL.setData(1,2,2.0/16.0); _GBLUR_KERNEL.setData(2,0,1.0/16.0); _GBLUR_KERNEL.setData(2,1,2.0/16.0); _GBLUR_KERNEL.setData(2,2,1.0/16.0); return &_GBLUR_KERNEL; } static Lazarus::Matrix2<double>* get_BOX_BLUR_KERNEL() { static Lazarus::Matrix2<double> _BBLUR_KERNEL; _BBLUR_KERNEL.initMatrix(3,3); _BBLUR_KERNEL.setData(0,0,1.0/9.0); _BBLUR_KERNEL.setData(0,1,1.0/9.0); _BBLUR_KERNEL.setData(0,2,1.0/9.0); _BBLUR_KERNEL.setData(1,0,1.0/9.0); _BBLUR_KERNEL.setData(1,1,1.0/9.0); _BBLUR_KERNEL.setData(1,2,1.0/9.0); _BBLUR_KERNEL.setData(2,0,1.0/9.0); _BBLUR_KERNEL.setData(2,1,1.0/9.0); _BBLUR_KERNEL.setData(2,2,1.0/9.0); return &_BBLUR_KERNEL; } ConvolutionFilter() { mp_filter_mask = NULL; } ConvolutionFilter(Lazarus::Matrix2<double>* filter) { mp_filter_mask = filter; } virtual ~ConvolutionFilter(){} void setFilterMask(Lazarus::Matrix2<double>* filter) { this->mp_filter_mask = filter; } /** * We assume a filter mask with odd dimensions. The convolution will be computed on an extended image with black borders * such that the kernel can be positioned onto the first image pixel. * Returns the filtered image in case of success otherwise NULL. * */ Lazarus::Image<T>* filterImage( Lazarus::Image<T>* image) { unsigned int offset_x = (mp_filter_mask->getColumnCount()-1)/2; unsigned int offset_y = (mp_filter_mask->getRowCount()-1)/2; unsigned int image_width = image->getm_width(); unsigned int image_heigth = image->getm_height(); unsigned int channel_count = image->getm_channel_count(); unsigned int filter_width = mp_filter_mask->getColumnCount(); unsigned int filter_height = mp_filter_mask->getRowCount(); if(filter_width % 2 != 1) { printf("filter width %d is not odd\n",filter_width); return NULL; } if(filter_height % 2 != 1) { printf("filter height %d is not odd\n",filter_height); return NULL; } Lazarus::Image<T>* output = new Lazarus::Image<T>( image_width, image_heigth, image->getm_data_alignment(),image->getChannelLimits() ); Lazarus::Image<T>* temporary = new Lazarus::Image<T>( image_width + 2*offset_x, image_heigth + 2*offset_y, image->getm_data_alignment(),image->getChannelLimits() ); //fill the output and temp image with black Lazarus::FastKTuple<T> color(channel_count); for(unsigned int i=0; i< channel_count; i++) { color.setElement(i,0); } output->fillImageFast( &color ); temporary->fillImageFast( &color ); //copy the input image into the temp buffer; for(unsigned int i=0; i<image_width; i++) { for(unsigned int j=0; j<image_heigth; j++) { image->getPixelFast( &color,i,j ); temporary->setPixelFast(&color,offset_x + i,offset_y + j); } } //start the convolution process //over every pixel unsigned int c_limit = 0; if(channel_count > 3) c_limit = 3; else c_limit = channel_count; #pragma omp parallel for for(unsigned int i=offset_x; i<image_width+(offset_x); i++) { double temp_value = 0; double filter_value = 0; Lazarus::FastKTuple<T> new_color(channel_count); Lazarus::FastKTuple<T> color_(channel_count); for(unsigned int j=offset_y; j<image_heigth+(offset_y); j++) { //over every color channel for(unsigned int c=0; c<c_limit; c++) { //convolution for(int k=-offset_x; k<=(int)offset_x; ++k) { for(int l=-offset_y; l<=(int)offset_y; ++l) { temporary->getPixelFast(&color_, (unsigned int)((int)i+k), (unsigned int)((int)j+l)); filter_value = mp_filter_mask->getData((unsigned int)((int)offset_x+k), (unsigned int)((int)offset_y+l) ); temp_value += (double)(color_.getElement(c))*filter_value; } } new_color.setElement(c,(T)std::min(std::max(temp_value,(double)std::numeric_limits<T>::min()),(double)image->getChannelLimits().m_max_values.getElement(c))); temp_value=0; } //set the alpha value to the image value if(channel_count>3) { new_color.setElement(3,color_.getElement(3)); } output->setPixelFast(&new_color,i-(offset_x),j-(offset_y)); } } //delete the temporary image delete temporary; return output; } private: Lazarus::Matrix2<double>* mp_filter_mask; }; } /* namespace Lazarus */ #endif /* CONVOLUTIONFILTER_H_ */
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult), q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)* mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)* mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; extern const char DefaultTileFrame[]; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,center+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post)+ GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre)- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
exchange_boundary.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ // perform a (intra-level) ghost zone exchange // NOTE exchange_boundary() only exchanges the boundary. // It will not enforce any boundary conditions // BC's are either the responsibility of a separate function or should be fused into the stencil void exchange_boundary(level_type * level, int id, int justFaces){ uint64_t _timeCommunicationStart = CycleTime(); uint64_t _timeStart,_timeEnd; int buffer=0; int n; if(justFaces)justFaces=1;else justFaces=0; // must be 0 or 1 in order to index into exchange_ghosts[] #ifdef USE_MPI int nMessages = level->exchange_ghosts[justFaces].num_recvs + level->exchange_ghosts[justFaces].num_sends; MPI_Request *recv_requests = level->exchange_ghosts[justFaces].requests; MPI_Request *send_requests = level->exchange_ghosts[justFaces].requests + level->exchange_ghosts[justFaces].num_recvs; // loop through packed list of MPI receives and prepost Irecv's... _timeStart = CycleTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level->exchange_ghosts[justFaces].num_recvs;n++){ MPI_Irecv(level->exchange_ghosts[justFaces].recv_buffers[n], level->exchange_ghosts[justFaces].recv_sizes[n], MPI_DOUBLE, level->exchange_ghosts[justFaces].recv_ranks[n], 0, // by convention, ghost zone exchanges use tag=0 MPI_COMM_WORLD, //&level->exchange_ghosts[justFaces].requests[n] &recv_requests[n] ); } _timeEnd = CycleTime(); level->cycles.ghostZone_recv += (_timeEnd-_timeStart); // pack MPI send buffers... _timeStart = CycleTime(); #pragma omp parallel for if(level->exchange_ghosts[justFaces].num_blocks[0]>1) schedule(static,1) for(buffer=0;buffer<level->exchange_ghosts[justFaces].num_blocks[0];buffer++){CopyBlock(level,id,&level->exchange_ghosts[justFaces].blocks[0][buffer]);} _timeEnd = CycleTime(); level->cycles.ghostZone_pack += (_timeEnd-_timeStart); // loop through MPI send buffers and post Isend's... _timeStart = CycleTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level->exchange_ghosts[justFaces].num_sends;n++){ MPI_Isend(level->exchange_ghosts[justFaces].send_buffers[n], level->exchange_ghosts[justFaces].send_sizes[n], MPI_DOUBLE, level->exchange_ghosts[justFaces].send_ranks[n], 0, // by convention, ghost zone exchanges use tag=0 MPI_COMM_WORLD, &send_requests[n] //&level->exchange_ghosts[justFaces].requests[n+level->exchange_ghosts[justFaces].num_recvs] // requests[0..num_recvs-1] were used by recvs. So sends start at num_recvs ); } _timeEnd = CycleTime(); level->cycles.ghostZone_send += (_timeEnd-_timeStart); #endif // exchange locally... try and hide within Isend latency... _timeStart = CycleTime(); #pragma omp parallel for if(level->exchange_ghosts[justFaces].num_blocks[1]>1) schedule(static,1) for(buffer=0;buffer<level->exchange_ghosts[justFaces].num_blocks[1];buffer++){CopyBlock(level,id,&level->exchange_ghosts[justFaces].blocks[1][buffer]);} _timeEnd = CycleTime(); level->cycles.ghostZone_local += (_timeEnd-_timeStart); // wait for MPI to finish... #ifdef USE_MPI _timeStart = CycleTime(); if(nMessages)MPI_Waitall(nMessages,level->exchange_ghosts[justFaces].requests,level->exchange_ghosts[justFaces].status); _timeEnd = CycleTime(); level->cycles.ghostZone_wait += (_timeEnd-_timeStart); // unpack MPI receive buffers _timeStart = CycleTime(); #pragma omp parallel for if(level->exchange_ghosts[justFaces].num_blocks[2]>1) schedule(static,1) for(buffer=0;buffer<level->exchange_ghosts[justFaces].num_blocks[2];buffer++){CopyBlock(level,id,&level->exchange_ghosts[justFaces].blocks[2][buffer]);} _timeEnd = CycleTime(); level->cycles.ghostZone_unpack += (_timeEnd-_timeStart); #endif level->cycles.ghostZone_total += (uint64_t)(CycleTime()-_timeCommunicationStart); }
user_defined_move_generator.h
/*****************************************************************************/ // Copyright (c) 2020-2021 Yuji KOGUMA // Released under the MIT license // https://opensource.org/licenses/mit-license.php /*****************************************************************************/ #ifndef PRINTEMPS_NEIGHBORHOOD_USER_DEFINED_MOVE_GENERATOR_H__ #define PRINTEMPS_NEIGHBORHOOD_USER_DEFINED_MOVE_GENERATOR_H__ #include "abstract_move_generator.h" namespace printemps { namespace neighborhood { /*****************************************************************************/ template <class T_Variable, class T_Expression> class UserDefinedMoveGenerator : public AbstractMoveGenerator<T_Variable, T_Expression> { private: std::function<void(std::vector<Move<T_Variable, T_Expression>> *)> m_move_updater_wrapper; public: /*************************************************************************/ UserDefinedMoveGenerator(void) { this->initialize(); } /*************************************************************************/ virtual ~UserDefinedMoveGenerator(void) { /// nothing to do } /*************************************************************************/ inline constexpr void initialize(void) { this->m_move_updater_wrapper = [](std::vector<Move<T_Variable, T_Expression>> *) {}; } /*************************************************************************/ inline constexpr void set_move_updater( const std::function<void(std::vector<Move<T_Variable, T_Expression>> *)> &a_MOVE_UPDATER) { this->m_move_updater_wrapper = a_MOVE_UPDATER; } /*************************************************************************/ void setup(void) { auto move_updater = // [this](auto * a_moves, // auto * a_flags, // const bool a_ACCEPT_ALL, // const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, // const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, // [[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) { m_move_updater_wrapper(a_moves); const int MOVES_SIZE = a_moves->size(); a_flags->resize(MOVES_SIZE); #ifdef _OPENMP #pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static) #endif for (auto i = 0; i < MOVES_SIZE; i++) { (*a_moves)[i].sense = MoveSense::UserDefined; (*a_flags)[i] = 1; if (neighborhood::has_fixed_variable((*a_moves)[i])) { (*a_flags)[i] = 0; continue; } if (neighborhood::has_selection_variable((*a_moves)[i])) { (*a_flags)[i] = 0; continue; } if (neighborhood::has_bound_violation((*a_moves)[i])) { (*a_flags)[i] = 0; continue; } if (a_ACCEPT_ALL) { /** nothing to do */ } else { if (a_ACCEPT_OBJECTIVE_IMPROVABLE && neighborhood::has_objective_improvable_variable( (*a_moves)[i])) { continue; } if (a_ACCEPT_FEASIBILITY_IMPROVABLE && neighborhood::has_feasibility_improvable_variable( (*a_moves)[i])) { continue; } (*a_flags)[i] = 0; } } }; this->m_move_updater = move_updater; } }; } // namespace neighborhood } // namespace printemps #endif /*****************************************************************************/ // END /*****************************************************************************/
PosTransformer.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- /** @file VectorOperators.h * @brief Support funtions to handle position type data manged by soa */ #ifndef QMCPLUSPLUS_SOA_FAST_PARTICLE_OPERATORS_H #define QMCPLUSPLUS_SOA_FAST_PARTICLE_OPERATORS_H #include <simd/blas1.hpp> namespace qmcplusplus { //Need to reorg #if 0 /** Dummy template class to be specialized * * - T1 the datatype to be transformed * - D dimension * - ORTHO true, if only Diagonal Elements are used */ template<class T1, unsigned D, bool ORTHO> struct PosTransformer { }; /** Specialized PosTransformer<T,3,true> using only the diagonal elements */ template<class T> struct PosTransformer<T,3,true> { using Array_t=VectorSoaContainer<T,3>; using Transformer_t=Tensor<T,3>; //index for the tensor enum {iXX=0, iXY=1, iXZ=2, iYX=3, iYY=4, iYZ=5, iZX=6, iZY=7, iZZ=8}; inline static void apply(const Array_t& pin, const Transformer_t& X, Array_t& pout, int first, int last) { const int n=last-first; blas::axpy(X[iXX],pin.data(0),pout.data(0),n); blas::axpy(X[iYY],pin.data(1),pout.data(1),n); blas::axpy(X[iZZ],pin.data(2),pout.data(2),n); } inline static void apply(const Transformer_t& X, const Array_t& pin, Array_t& pout, int first, int last) { ::apply(pin,X,pout,first,last); } inline static void apply(Array_t& pinout, const Transformer_t& X,int first, int last) { const int n=last-first; blas::scal(X[iXX],pinout.data(0),n); blas::scal(X[iYY],pinout.data(1),n); blas::scal(X[iZZ],pinout.data(2),n); } inline static void apply(const Transformer_t& X, Array_t& pinout, int first, int last) { ::apply(pinout,X,first,last); } }; template<class T> struct PosTransformer<T,3,false> { using Array_t=VectorSoaContainer<T,3>; using Transformer_t=Tensor<T,3>; inline static void apply(const Array_t& pin, const Transformer_t& X, Array_t& pout, int first, int last) { const int n=last-first; register T x00=X[0],x01=X[1],x02=X[2], x10=X[3],x11=X[4],x12=X[5], x20=X[6],x21=X[7],x22=X[8]; const T* restrict x_in=pin.data(0)+first; ASSUME_ALIGNED(x_in); const T* restrict y_in=pin.data(1)+first; ASSUME_ALIGNED(y_in); const T* restrict z_in=pin.data(2)+first; ASSUME_ALIGNED(z_in); T* restrict x_out=pout.data(0)+first; ASSUME_ALIGNED(x_out); T* restrict y_out=pout.data(1)+first; ASSUME_ALIGNED(y_out); T* restrict z_out=pout.data(2)+first; ASSUME_ALIGNED(z_out); #pragma ivdep for(int i=0; i<n; i++) { x_out[i]=x_in[i]*x00+y_in[i]*x10+z_in[i]*x20; y_out[i]=x_in[i]*x01+y_in[i]*x11+z_in[i]*x21; z_out[i]=x_in[i]*x02+y_in[i]*x12+z_in[i]*x22; } } inline static void apply(const Transformer_t& X, const Array_t& pin, Array_t& pout, int first, int last) { ::apply(pin,X,pout,first,last); } inline static void apply(Array_t& pinout, const Transformer_t& X,int first, int last) { const int n=last-first; register T x00=X[0],x01=X[1],x02=X[2], x10=X[3],x11=X[4],x12=X[5], x20=X[6],x21=X[7],x22=X[8]; T* restrict x_inout=pinout.data(0)+first; ASSUME_ALIGNED(x_inout); T* restrict y_inout=pinout.data(1)+first; ASSUME_ALIGNED(y_inout); T* restrict z_inout=pinout.data(2)+first; ASSUME_ALIGNED(z_inout); #pragma ivdep for(int i=0; i<n; i++) { T x=x_inout[i]*x00+y_inout[i]*x10+z_inout[i]*x20; T y=x_inout[i]*x01+y_inout[i]*x11+z_inout[i]*x21; T z=x_inout[i]*x02+y_inout[i]*x12+z_inout[i]*x22; x_inout[i]=x; y_inout[i]=y; z_inout[i]=z; } } inline static void apply(const Transformer_t& X, Array_t& pinout, int first, int last) { ::apply(X,pinout,first,last); } }; #endif /** General conversion function from AoS[nrows][ncols] to SoA[ncols][ldb] * @param nrows the first dimension * @param ncols the second dimension * @param iptr input pointer * @param lda stride of iptr * @param out output pointer * @param lda strided of out * * Modeled after blas/lapack for lda/ldb */ template<typename T1, typename T2> void PosAoS2SoA(int nrows, int ncols, const T1* restrict iptr, int lda, T2* restrict out, int ldb) { T2* restrict x=out ; T2* restrict y=out+ ldb; T2* restrict z=out+2*ldb; #pragma omp simd aligned(x,y,z) for(int i=0; i<nrows;++i) { x[i]=iptr[i*ncols ]; //x[i]=in[i][0]; y[i]=iptr[i*ncols+1]; //y[i]=in[i][1]; z[i]=iptr[i*ncols+2]; //z[i]=in[i][2]; } } /** General conversion function from SoA[ncols][ldb] to AoS[nrows][ncols] * @param nrows the first dimension * @param ncols the second dimension * @param iptr input pointer * @param lda stride of iptr * @param out output pointer * @param lda strided of out * * Modeled after blas/lapack for lda/ldb */ template<typename T1, typename T2> void PosSoA2AoS(int nrows, int ncols, const T1* restrict iptr, int lda, T2* restrict out, int ldb) { const T1* restrict x=iptr ; const T1* restrict y=iptr+ lda; const T1* restrict z=iptr+2*lda; #pragma omp simd aligned(x,y,z) for(int i=0; i<nrows;++i) { out[i*ldb ]=x[i]; //out[i][0]=x[i]; out[i*ldb+1]=y[i]; //out[i][1]=y[i]; out[i*ldb+2]=z[i]; //out[i][2]=z[i]; } } #if 0 //#if defined(HAVE_MKL) ///specialization for double AoS2SoA template<> void PosAoS2SoA(int nrows, int ncols, const double* restrict in, int lda, double* restrict out, int ldb) { const double zone={1.0}; mkl_domatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb); } ///specialization for float AoS2SoA template<> void PosAoS2SoA(int nrows, int ncols, const float* restrict in, int lda, float* restrict out, int ldb) { const float zone={1.0f}; mkl_somatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb); } ///specialization for double SoA2AoS template<> void PosSoA2AoS(int nrows, int ncols, const double* restrict in, int lda, double* restrict out, int ldb) { const double zone={1.0}; mkl_domatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb); } ///specialization for float SoA2AoS template<> void PosSoA2AoS(int nrows, int ncols, const float* restrict in, int lda, float* restrict out, int ldb) { const float zone={1.0f}; mkl_somatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb); } #endif } #endif
zmerge.c
#include "zmerge.h" fint zmerge_(const fnat m[static restrict 1], const fnat n[static restrict 1], const double Ar[static restrict VDL], const fnat ldAr[static restrict 1], const double Ai[static restrict VDL], const fnat ldAi[static restrict 1], double complex A[static restrict VDL_2], const fnat ldA[static restrict 1]) { #ifndef NDEBUG if (IS_NOT_VFPENV) return -9; if (*m & VDL__2) return -1; if (IS_NOT_ALIGNED(Ar)) return -3; if (*ldAr < *m) return -4; if (*ldAr & VDL_1) return -4; if (IS_NOT_ALIGNED(Ai)) return -5; if (*ldAi < *m) return -6; if (*ldAi & VDL_1) return -6; if (IS_NOT_ALIGNED(A)) return -7; if (*ldA < *m) return -8; if (*ldA & VDL__2) return -8; #endif /* !NDEBUG */ #ifdef _OPENMP #pragma omp parallel for default(none) shared(m,n,A,ldA,Ar,ldAr,Ai,ldAi) for (fnat j = 0u; j < *n; ++j) { register const VI idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0); double complex *const Aj = A + j * (size_t)(*ldA); const double *const Arj = Ar + j * (size_t)(*ldAr); const double *const Aij = Ai + j * (size_t)(*ldAi); for (fnat i = 0u; i < *m; i += VDL_2) _mm512_store_pd((Aj + i), _mm512_permutexvar_pd(idx, _mm512_insertf64x4(_mm512_zextpd256_pd512(_mm256_load_pd(Arj + i)), _mm256_load_pd(Aij + i), 0x01u))); } return 1; #else /* !_OPENMP */ register const VI idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0); for (fnat j = 0u; j < *n; ++j) { double complex *const Aj = A + j * (size_t)(*ldA); const double *const Arj = Ar + j * (size_t)(*ldAr); const double *const Aij = Ai + j * (size_t)(*ldAi); for (fnat i = 0u; i < *m; i += VDL_2) _mm512_store_pd((Aj + i), _mm512_permutexvar_pd(idx, _mm512_insertf64x4(_mm512_zextpd256_pd512(_mm256_load_pd(Arj + i)), _mm256_load_pd(Aij + i), 0x01u))); } return 0; #endif /* ?_OPENMP */ }
mm.mp.c
#include "../include/util.h" float **m_a, **m_b, **m_c; float real_time, proc_time, mflops; long long flpins; int values[5]; void init() { if(values[1] != values[2]) { printf("O número de linhas da matriz B necessita ser igual ao número de colunas da matriz A\n"); exit(EXIT_FAILURE); } printf("Starting to allocate, %d threads available.\n", omp_get_max_threads()); m_a = (float**) malloc(sizeof(float*) * values[0]); m_b = (float**) malloc(sizeof(float*) * values[2]); m_c = (float**) malloc(sizeof(float*) * values[0]); if(values[0] != values[1] || values[2] != values[3]) { #pragma omp parallel num_threads(8) { #pragma omp for for(int i = 0; i < values[1]; i++) { m_a[i] = (float*) malloc(sizeof(float) * values[1]); for(int j = 0; j < values[1]; j++) m_a[i][j] = rand() % 100 / rand() % 100 + 1; } #pragma omp barrier #pragma omp for for(int i = 0; i < values[3]; i++) { m_b[i] = (float*) malloc(sizeof(float) * values[3]); for(int j = 0; j < values[1]; j++) m_b[i][j] = rand() % 100 / rand() % 100 + 1; } #pragma omp barrier #pragma omp for for(int i = 0; i < values[0]; i++) { m_c[i] = (float*) malloc(sizeof(float) * values[3]); for(int j = 0; j < values[3]; j++) m_c[i][j] = 0; } #pragma omp barrier } } else { #pragma omp parallel for num_threads(8) for(int i = 0; i < values[1]; i++) { m_a[i] = (float*) malloc(sizeof(float) * values[1]); m_b[i] = (float*) malloc(sizeof(float) * values[3]); m_c[i] = (float*) malloc(sizeof(float) * values[3]); for(int j = 0; j < values[1]; j++) { m_a[i][j] = rand() % 1000 / (rand() % 100 + 1); m_b[i][j] = rand() % 1000 / (rand() % 100 + 1); m_c[i][j] = 0; } } } printf("Done allocating.\n"); } void mm() { printf("Starting MM with %d available threads.\n", omp_get_max_threads()); #pragma omp parallel for collapse(3) num_threads(8) for(int i = 0; i < values[0]; i++) for(int j = 0; j < values[0]; j++) for(int k = 0; k < values[3]; k++) m_c[i][j] += m_a[i][k] * m_b[k][j]; printf("Done MM\n"); } int main(int argc, char *argv[]) { // PAPI_library_init(PAPI_VER_CURRENT); // PAPI_thread_init(pthread_self); srand(time(NULL)); int c; omp_set_num_threads(8); struct option long_args[] = { {"la", required_argument, NULL, 'a'}, {"ca", required_argument, NULL, 'b'}, {"lb", required_argument, NULL, 'c'}, {"cb", required_argument, NULL, 'd'} }; do { c = getopt_long(argc, argv, "a:b:c:d:", long_args, NULL); switch(c) { case -1: case 0: break; case 'a': { values[0] = atoi(optarg); } break; case 'b': { values[1] = atoi(optarg); } break; case 'c': { values[2] = atoi(optarg); } break; case 'd': { values[3] = atoi(optarg); } break; default: { printf("Os seguintes paramêtros são obrigatório para a execução do programa: \ \n\tNúmero de linhas da matriz A (--la | -a). \ \n\tNúmero de colunas da matriz A (--ca | -b). \ \n\tNúmero de linhas da matriz B (--lb | -c). \ \n\tNúmero de colunas da matriz B (--cb | -d)."); } exit(EXIT_FAILURE); } } while(c != -1); init(); TIME() mm(); ENDTIME() // printf("time: %f | proc_time: %f, mflops: %f, flpins: %lld\n", real_time, proc_time, mflops, flpins); exit(EXIT_SUCCESS); }
GB_unaryop__abs_uint64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_uint32 // op(A') function: GB_tran__abs_uint64_uint32 // C type: uint64_t // A type: uint32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_uint32 ( uint64_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__pow_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fp64) // C=scalar+B GB (_bind1st__pow_fp64) // C=scalar+B' GB (_bind1st_tran__pow_fp64) // C=A+scalar GB (_bind2nd__pow_fp64) // C=A'+scalar GB (_bind2nd_tran__pow_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = GB_pow (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FP64 || GxB_NO_POW_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = GB_pow (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = GB_pow (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mixed_tentusscher_myo_epi_2004_S2_5.c
// Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_5.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5180103766650,0.00130403507538545,0.778471794824763,0.778283525838093,0.000176065618651399,0.484567171933683,0.00295084416213736,0.999998331387518,1.94976417188725e-08,1.90377881905604e-05,0.999769816140066,1.00765120153943,0.999999421011808,3.19432158740013e-05,1.25705353770675,9.77685728583073,139.479930314946}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.0425347024085,0.000535677026211757,0.000165310108418668,0.000350622775813131,0.284242546174007,0.160699932744295,0.199053804511766,4.02983501857037,0.0207339018842815,1.90362917367254,1092.38391918131,0.000589746455166102,0.152476943428258,0.0199994247147465,0.00594175101819478,9.05890620616966e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
ZQ_CNN_MTCNN.h
#ifndef _ZQ_CNN_MTCNN_H_ #define _ZQ_CNN_MTCNN_H_ #pragma once #include "ZQ_CNN_Net.h" #include "ZQ_CNN_BBoxUtils.h" #include <omp.h> namespace ZQ { class ZQ_CNN_MTCNN { public: using string = std::string; ZQ_CNN_MTCNN() { min_size = 60; thresh[0] = 0.6; thresh[1] = 0.7; thresh[2] = 0.7; nms_thresh[0] = 0.6; nms_thresh[1] = 0.7; nms_thresh[2] = 0.7; width = 0; height = 0; factor = 0.709; pnet_overlap_thresh_count = 4; pnet_size = 12; pnet_stride = 2; special_handle_very_big_face = false; force_run_pnet_multithread = false; show_debug_info = false; limit_r_num = 0; limit_o_num = 0; limit_l_num = 0; } ~ZQ_CNN_MTCNN() { } private: #if __ARM_NEON const int BATCH_SIZE = 16; #else const int BATCH_SIZE = 64; #endif std::vector<ZQ_CNN_Net> pnet, rnet, onet, lnet; bool has_lnet; int thread_num; float thresh[3], nms_thresh[3]; int min_size; int width, height; float factor; int pnet_overlap_thresh_count; int pnet_size; int pnet_stride; int rnet_size; int onet_size; int lnet_size; bool special_handle_very_big_face; bool do_landmark; float early_accept_thresh; float nms_thresh_per_scale; bool force_run_pnet_multithread; std::vector<float> scales; std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> pnet_images; ZQ_CNN_Tensor4D_NHW_C_Align128bit ori_input, rnet_image, onet_image; bool show_debug_info; int limit_r_num; int limit_o_num; int limit_l_num; public: void TurnOnShowDebugInfo() { show_debug_info = true; } void TurnOffShowDebugInfo() { show_debug_info = false; } void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0) { limit_r_num = limit_r; limit_o_num = limit_o; limit_l_num = limit_l; } bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model, const string& onet_param, const string& onet_model, int thread_num = 1, bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "") { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if (has_lnet) { lnet.resize(thread_num); } bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFrom(pnet_param, pnet_model,true,1e-9, true) && rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true) && onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.2f M, onet = %.2f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.2f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet[0].GetInputDim(C, H, W); lnet_size = H; } return ret; } bool InitFromBuffer( const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len, const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len, const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len, int thread_num = 1, bool has_lnet = false, const char* lnet_param = 0, __int64 lnet_param_len = 0, const char* lnet_model = 0, __int64 lnet_model_len = 0) { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if(has_lnet) lnet.resize(thread_num); bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len,pnet_model,pnet_model_len, true, 1e-9, true) && rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len, true, 1e-9, true) && onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFromBuffer(lnet_param, lnet_param_len, lnet_model, lnet_model_len, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet [0].GetInputDim (C, H, W); lnet_size = H; } return ret; } void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7, float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709, int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false, bool do_landmark = true, float early_accept_thresh = 1.00) { min_size = __max(pnet_size, min_face_size); thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh); nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh); scale_factor = __max(0.5, __min(0.97, scale_factor)); this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count); this->pnet_size = pnet_size; this->pnet_stride = pnet_stride; this->special_handle_very_big_face = special_handle_very_big_face; this->do_landmark = do_landmark; this->early_accept_thresh = early_accept_thresh; if (pnet_size == 20 && pnet_stride == 4) nms_thresh_per_scale = 0.45; else nms_thresh_per_scale = 0.495; if (width != w || height != h || factor != scale_factor) { scales.clear(); pnet_images.clear(); width = w; height = h; float minside = __min(width, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height); int count = scales.size(); for (int i = scales.size() - 1; i >= 0; i--) { if (ceil(scales[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales.resize(count); if (count > 0) { float last_size = ceil(scales[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales.push_back((float)tmp_size / minside); count++; } } scales.push_back((float)pnet_size / minside); count++; } else { scales.push_back((float)pnet_size / minside); count++; } pnet_images.resize(count); } } bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find(ori_input, results); } bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find106(ori_input, results); } bool Find(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, results)) return false; double t4 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n", 1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3)); } } else { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } } return true; } bool Find106(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { return false; } double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet106_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } return true; } private: void _compute_Pnet_single_thread(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } for (int i = 0; i < scale_num; i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales[i] != 1) pnet[0].Forward(pnet_images[i]); else pnet[0].Forward(input); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if(row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } } void _compute_Pnet_multi_thread(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { if (thread_num <= 1) { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1) for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } std::vector<int> task_rect_off_x; std::vector<int> task_rect_off_y; std::vector<int> task_rect_width; std::vector<int> task_rect_height; std::vector<float> task_scale; std::vector<int> task_scale_id; int stride = pnet_stride; const int block_size = 64 * stride; int cellsize = pnet_size; int border_size = cellsize - stride; int overlap_border_size = cellsize / stride; int jump_size = block_size - border_size; for (int i = 0; i < scales.size(); i++) { int changeH = (int)ceil(height*scales[i]); int changeW = (int)ceil(width*scales[i]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales[i]); task_scale_id.push_back(i); } } } } // int task_num = task_scale.size(); std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_pnet_images(thread_num); if (thread_num <= 1) { for (int i = 0; i < task_num; i++) { int thread_id = 0;// omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } } bool _Pnet_stage(ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& firstBbox) { if (thread_num <= 0) return false; double t1 = omp_get_wtime(); firstBbox.clear(); std::vector<std::vector<float> > maps; std::vector<int> mapH; std::vector<int> mapW; if (thread_num == 1 && !force_run_pnet_multithread) { pnet[0].TurnOffShowDebugInfo(); //pnet[0].TurnOnShowDebugInfo(); _compute_Pnet_single_thread(input, maps, mapH, mapW); } else { _compute_Pnet_multi_thread(input, maps, mapH, mapW); } ZQ_CNN_OrderScore order; std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size()); std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size()); const int block_size = 32; int stride = pnet_stride; int cellsize = pnet_size; int border_size = cellsize / stride; for (int i = 0; i < maps.size(); i++) { double t13 = omp_get_wtime(); int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; int count = 0; //score p int scoreH = mapH[i]; int scoreW = mapW[i]; const float *p = &maps[i][0]; if (scoreW <= block_size && scoreH < block_size) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bounding_boxes[i].push_back(bbox); bounding_scores[i].push_back(order); count++; } p ++; } } int before_count = bounding_boxes[i].size(); ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } else { int before_count = 0, after_count = 0; int block_H_num = __max(1, scoreH / block_size); int block_W_num = __max(1, scoreW / block_size); int block_num = block_H_num*block_W_num; int width_per_block = scoreW / block_W_num; int height_per_block = scoreH / block_H_num; std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num); std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num); std::vector<int> block_start_w(block_num), block_end_w(block_num); std::vector<int> block_start_h(block_num), block_end_h(block_num); for (int bh = 0; bh < block_H_num; bh++) { for (int bw = 0; bw < block_W_num; bw++) { int bb = bh * block_W_num + bw; block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size); block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block); block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size); block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block); } } int chunk_size = 1;// ceil((float)block_num / thread_num); if (thread_num <= 1) { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } else { #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num) for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { const float* p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } count = 0; for (int bb = 0; bb < block_num; bb++) { std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin(); for (; it != tmp_bounding_boxes[bb].end(); it++) { if ((*it).exist) { bounding_boxes[i].push_back(*it); order.score = (*it).score; order.oriOrder = count; bounding_scores[i].push_back(order); count++; } } } //ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0); after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } } std::vector<ZQ_CNN_OrderScore> firstOrderScore; int count = 0; for (int i = 0; i < scales.size(); i++) { std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin(); for (; it != bounding_boxes[i].end(); it++) { if ((*it).exist) { firstBbox.push_back(*it); order.score = (*it).score; order.oriOrder = count; firstOrderScore.push_back(order); count++; } } } //the first stage's nms if (count < 1) return false; double t15 = omp_get_wtime(); ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height,true); double t16 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms\n", 1000 * (t16 - t15)); if (show_debug_info) printf("first stage candidate count: %d\n", count); double t3 = omp_get_wtime(); if (show_debug_info) printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t1)); return true; } bool _Rnet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox) { double t3 = omp_get_wtime(); secondBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin(); std::vector<ZQ_CNN_OrderScore> secondScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int r_count = 0; for (; it != firstBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); r_count++; secondBbox.push_back(*it); } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)r_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)r_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_rnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(r_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_secondBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_secondBbox[i][j] = secondBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[0].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[0].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[thread_id].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D* score = rnet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = rnet[thread_id].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_secondBbox[i].size(); } secondBbox.resize(count); secondScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_secondBbox[i].size(); j++) { secondBbox[id] = task_secondBbox[i][j]; secondScore[id].score = secondBbox[id].score; secondScore[id].oriOrder = id; id++; } } //ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union"); ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min"); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true); count = secondBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count); if (show_debug_info) printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } bool _Onet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox) { double t4 = omp_get_wtime(); thirdBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin(); std::vector<ZQ_CNN_OrderScore> thirdScore; std::vector<ZQ_CNN_BBox> early_accept_thirdBbox; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int o_count = 0; for (; it != secondBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { if (!do_landmark && it->score > early_accept_thresh) { early_accept_thirdBbox.push_back(*it); } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); o_count++; thirdBbox.push_back(*it); } } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)o_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)o_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_onet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(o_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_thirdBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_thirdBbox[i][j] = thirdBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[0].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[0].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[0].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[thread_id].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* score = onet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D* location = onet[thread_id].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D* keyPoint = onet[thread_id].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_thirdBbox[i].size(); } thirdBbox.resize(count); thirdScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_thirdBbox[i].size(); j++) { thirdBbox[id] = task_thirdBbox[i][j]; thirdScore[id].score = task_thirdBbox[i][j].score; thirdScore[id].oriOrder = id; id++; } } ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false); ZQ_CNN_OrderScore order; for (int i = 0; i < early_accept_thirdBbox.size(); i++) { order.score = early_accept_thirdBbox[i].score; order.oriOrder = count++; thirdScore.push_back(order); thirdBbox.push_back(early_accept_thirdBbox[i]); } ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min"); double t5 = omp_get_wtime(); if (show_debug_info) printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count); if (show_debug_info) printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox) { double t4 = omp_get_wtime(); fourthBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j] = copy_fourthBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } fourthBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet106_stage(const ZQ_CNN_Tensor4D_NHW_C_Align128bit& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox) { double t4 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> fourthBbox; std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1; task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2; task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1; task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2; task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area; task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score; task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist; } } } resultBbox.resize(l_count); for (int i = 0; i < l_count; i++) { resultBbox[i].col1 = fourthBbox[i].col1; resultBbox[i].col2 = fourthBbox[i].col2; resultBbox[i].row1 = fourthBbox[i].row1; resultBbox[i].row2 = fourthBbox[i].row2; resultBbox[i].score = fourthBbox[i].score; resultBbox[i].exist = fourthBbox[i].exist; resultBbox[i].area = fourthBbox[i].area; } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]/**0.25*/; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]/**0.25*/; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } else { #pragma omp parallel for num_threads(thread_num) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]/**0.5*/; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]/**0.5*/; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } resultBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height) { int in_num = bbox.size(); if (limit_num >= in_num) return; bbox.resize(limit_num); } }; } #endif
kdGroupFinder_omp.c
// Initialization // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <assert.h> #include <sys/time.h> #include <omp.h> #include "nrutil.h" #include "kdtree.h" #include "groups.h" struct galaxy *GAL; int NGAL; int OUTPUT=0; /* Local functions */ void find_satellites(int icen, void *kd); float radial_probability(float mass, float dr, float rad, float ang_rad); float fluxlim_correction(float z); void groupfind(void); /* Variables for determining * if a galaxy is a satellite */ float BPROB=10; float BPROB_RED = 5, BPROB_XRED=0; float BPROB_BLUE = 15, BPROB_XBLUE=0; /* Variables for weighting the * central galaxies of the blue galaxies */ float WCEN_MASS = 10.5, WCEN_SIG = 0.5, WCEN_MASSR = 10.5, WCEN_SIGR = 1.0, WCEN_NORMR = 0.5, WCEN_NORM = 0.5; float PROPX_WEIGHT_RED = 1000.0, PROPX_WEIGHT_BLUE = 1000.0; float PROPX_SLOPE_RED = 0, PROPX_SLOPE_BLUE = 0; float PROPX2_WEIGHT_RED = 1000.0, PROPX2_WEIGHT_BLUE = 1000.0; float MINREDSHIFT; float MAXREDSHIFT; float GALAXY_DENSITY; float FRAC_AREA; int FLUXLIM, COLOR; int STELLAR_MASS; int ARGC; char **ARGV; int RECENTERING=0; int SECOND_PARAMETER=0; int main(int argc, char **argv) { double t0, t1; int istart,istep; int i; if(argc<5) { fprintf(stderr,"kdGroupFinder inputfile zmin zmax frac_area [fluxlim] [color] [wcenvalues 1-6] [Bsat_values 1-4] [wchi_values 1-4]> out\n"); exit(0); } ARGC = argc; ARGV = argv; MINREDSHIFT = atof(argv[2]); MAXREDSHIFT = atof(argv[3]); FRAC_AREA = atof(argv[4]); STELLAR_MASS = 0; if(argc>5) FLUXLIM = atoi(argv[5]); if(FLUXLIM<0) { FLUXLIM=0; STELLAR_MASS=1; } if(argc>6) COLOR = atoi(argv[6]); if(argc>7) { WCEN_MASS = atof(argv[7]); WCEN_SIG = atof(argv[8]); WCEN_MASSR = atof(argv[9]); WCEN_SIGR = atof(argv[10]); WCEN_NORM = atof(argv[11]); WCEN_NORMR = atof(argv[12]); } if(argc>13) { BPROB_RED = atof(argv[13]); BPROB_XRED = atof(argv[14]); BPROB_BLUE = atof(argv[15]); BPROB_XBLUE = atof(argv[16]); } if(argc>17) { SECOND_PARAMETER=1; PROPX_WEIGHT_BLUE = atof(argv[17]); PROPX_WEIGHT_RED = atof(argv[18]); PROPX_SLOPE_BLUE = atof(argv[19]); PROPX_SLOPE_RED = atof(argv[20]); } /* if(argc>19) { SECOND_PARAMETER=2; PROPX2_WEIGHT_BLUE = atof(argv[19]); PROPX2_WEIGHT_RED = atof(argv[20]); } */ if(argc>21) { STELLAR_MASS = atoi(argv[21]); } fprintf(stderr,"input> FLUXLIM= %d, STELLAR_MASS= %d \n",FLUXLIM,STELLAR_MASS); fprintf(stderr,"input> %f %f %f %d\n",MINREDSHIFT, MAXREDSHIFT, FRAC_AREA, FLUXLIM); fprintf(stderr,"input> %f %f %f %f %f %f\n",WCEN_MASS, WCEN_SIG, WCEN_MASSR, WCEN_SIGR, WCEN_NORM, WCEN_NORMR); fprintf(stderr,"input> SECOND_PARAMETER= %d\n",SECOND_PARAMETER); fprintf(stderr,"input> %f %f %f %f\n",BPROB_RED,BPROB_XRED,BPROB_BLUE,BPROB_XBLUE); fprintf(stderr,"input> %f %f %f %f\n",PROPX_WEIGHT_BLUE,PROPX_WEIGHT_RED, PROPX_SLOPE_BLUE,PROPX_SLOPE_RED); if(STELLAR_MASS) fprintf(stderr,"NB! STELLAR_MASS=1\n"); OUTPUT = 1; groupfind(); OUTPUT = 0; //NB: nothing with the halo files exit(0); lsat_model(); tabulate_hods(); populate_simulation_omp(-1,0,0); //lsat_model_scatter(); t0 = omp_get_wtime(); for(i=0;i<10;i+=1) { populate_simulation_omp(i/2,i%2,1); } /* #pragma omp parallel private(i,istart,istep) { istart = omp_get_thread_num(); istep = omp_get_num_threads(); for(i=istart;i<10;i+=istep) { populate_simulation_omp(i/2,i%2,istart); } } */ t1 = omp_get_wtime(); fprintf(stderr,"popsim> %.2f sec\n",t1-t0); } void groupfind() { FILE *fp; char aa[1000]; int i, i1, niter, MAX_ITER=5, j, ngrp_prev, icen_new; float frac_area, zmin, zmax, nsat_tot, weight, wx; int fluxlim = 0, colors = 1; double galden, pt[3], t0,t1,t3,t4; long IDUM1 = -555; static int *permanent_id, *itmp, *flag; static float volume, *xtmp, *lumshift; static void *kd; static int first_call=1, ngrp; if(first_call) { colors = COLOR; first_call = 0; fp = openfile(ARGV[1]); NGAL = filesize(fp); fprintf(stderr,"Allocating space for [%d] galaxies\n",NGAL); GAL = calloc(NGAL, sizeof(struct galaxy)); flag = ivector(0,NGAL-1); fluxlim = FLUXLIM; zmin = MINREDSHIFT; zmax = MAXREDSHIFT; // calculate the volume of the sample volume = 4./3.*PI*(pow(distance_redshift(zmax),3.0))*FRAC_AREA; volume = volume - 4./3.*PI*(pow(distance_redshift(zmin),3.0))*FRAC_AREA; galden = 0; for(i=0;i<NGAL;++i) { fscanf(fp,"%f %f %f %f",&GAL[i].ra,&GAL[i].dec,&GAL[i].redshift,&GAL[i].mstellar); GAL[i].ra *= PI/180.; GAL[i].dec *= PI/180.; GAL[i].id = i; GAL[i].rco = distance_redshift(GAL[i].redshift); // check if the stellar mass is in log if(GAL[i].mstellar<100) GAL[i].mstellar = pow(10.0,GAL[i].mstellar); // check to see if we're doing a fluxlimited sample if(fluxlim) fscanf(fp,"%f",&GAL[i].vmax); else GAL[i].vmax = volume; // check to see if we're using colors if(colors) fscanf(fp,"%f",&GAL[i].color); if(SECOND_PARAMETER) fscanf(fp,"%f",&GAL[i].propx); if(SECOND_PARAMETER==2) fscanf(fp,"%f",&GAL[i].propx2); fgets(aa,1000,fp); galden += 1/GAL[i].vmax; } fclose(fp); fprintf(stderr,"Done reading in from [%s]\n",ARGV[1]); fprintf(stderr,"Volume= %e L_box= %f\n",volume, pow(volume, THIRD)); fprintf(stderr,"Number density= %e %e\n",NGAL/volume,galden); GALAXY_DENSITY = NGAL/volume; // first sort by stellar mass xtmp = vector(1,NGAL); itmp = ivector(1,NGAL); permanent_id = ivector(1,NGAL); lumshift = vector(0,NGAL-1); for(i=1;i<=NGAL;++i) { // just for kicks, give each galaxy a random luminosity lumshift[i-1] = pow(10.0,gasdev(&IDUM1)*0.0); xtmp[i] = -(GAL[i-1].mstellar*lumshift[i-1]); itmp[i] = i-1; } fprintf(stderr,"sorting galaxies...\n"); sort2(NGAL, xtmp, itmp); fprintf(stderr,"done sorting galaxies.\n"); // do the inverse-abundance matching density2host_halo(0.01); fprintf(stderr,"Starting inverse-sham...\n"); galden = 0; // reset the sham counters if(fluxlim) density2host_halo_zbins3(-1,0); //density2host_halo_zbins(-1); for(i1=1;i1<=NGAL;++i1) { i= itmp[i1]; GAL[i].grp_rank = i1; galden += 1/GAL[i].vmax; if(fluxlim==1) //GAL[i].mass = density2host_halo_zbins(GAL[i].redshift); GAL[i].mass = density2host_halo_zbins3(GAL[i].redshift,GAL[i].vmax); else GAL[i].mass = density2host_halo(galden); GAL[i].rad = pow(3*GAL[i].mass/(4.*PI*DELTA_HALO*RHO_CRIT*OMEGA_M),THIRD); GAL[i].theta = GAL[i].rad/GAL[i].rco; GAL[i].sigmav = sqrt(BIG_G*GAL[i].mass/2.0/GAL[i].rad*(1+GAL[i].redshift)); GAL[i].psat = 0; j = i; GAL[j].x = GAL[j].rco * cos(GAL[j].ra) * cos(GAL[j].dec); GAL[j].y = GAL[j].rco * sin(GAL[j].ra) * cos(GAL[j].dec); GAL[j].z = GAL[j].rco * sin(GAL[j].dec); } fprintf(stderr,"Done inverse-sham.\n"); // assume that NGAL=NGROUP at first ngrp = NGAL; } // let's create a 3D KD tree fprintf(stderr,"Building KD-tree...\n"); kd = kd_create(3); for(i = 1; i <= NGAL; ++i){ j = i; permanent_id[j] = j; pt[0] = GAL[j].x; pt[1] = GAL[j].y; pt[2] = GAL[j].z; assert( kd_insert(kd, pt, (void*)&permanent_id[j]) == 0); } fprintf(stderr,"Done building KD-tree. %d\n",ngrp); // test the FOF group finder //test_fof(kd); // now let's go to the center finder //test_centering(kd); // now start the group-finding iteratin for(niter=1;niter<=MAX_ITER;++niter) { t3 = omp_get_wtime(); // first, reset the psat values for(j=0;j<NGAL;++j) { GAL[j].igrp = -1; GAL[j].psat = 0; GAL[j].nsat = 0; GAL[j].mtot = GAL[j].mstellar; weight = 1.0; if(SECOND_PARAMETER) { if(GAL[j].color<0.8) { wx = PROPX_WEIGHT_BLUE + PROPX_SLOPE_BLUE*(log10(GAL[j].mstellar)-9.5); weight = exp(GAL[j].propx/wx); } if(GAL[j].color>0.8) { wx = PROPX_WEIGHT_RED + PROPX_SLOPE_RED*(log10(GAL[j].mstellar)-9.5); weight = exp(GAL[j].propx/wx); } } if(SECOND_PARAMETER==2) { if(GAL[j].color<0.8) weight *= exp(GAL[j].propx2/PROPX2_WEIGHT_BLUE); if(GAL[j].color>0.8) weight *= exp(GAL[j].propx2/PROPX2_WEIGHT_RED); } GAL[j].mtot*=weight; if(GAL[j].color<0.8) weight = 1/pow(10.0,0.5*(1+erf((log10(GAL[j].mstellar)-WCEN_MASS)/WCEN_SIG))*WCEN_NORM); else weight = 1/pow(10.0,0.5*(1+erf((log10(GAL[j].mstellar)-WCEN_MASSR)/WCEN_SIGR))*WCEN_NORMR); //GAL[j].mtot*=weight; GAL[j].weight = weight; flag[j] = 1; } // find the satellites for each halo, in order of group mass ngrp_prev = ngrp; ngrp = 0; t0 = omp_get_wtime(); #pragma omp parallel for private(i1,i) for(i1=1;i1<=ngrp_prev;++i1) { i = itmp[i1]; flag[i] = 0; find_satellites(i,kd); } for(i1=1;i1<=ngrp_prev;++i1) { i = itmp[i1]; if(GAL[i].psat<0.5) { GAL[i].igrp = i; ngrp++; GAL[i].mtot *= GAL[i].weight; xtmp[ngrp] = -GAL[i].mtot; itmp[ngrp] = i; GAL[i].listid = ngrp; if(fluxlim) xtmp[ngrp] *= fluxlim_correction(GAL[i].redshift); } } t1 = omp_get_wtime(); // go back and check objects are newly-exposed centrals #pragma omp parallel for private(j) for(j=0;j<NGAL;++j) { if(flag[j] && GAL[j].psat<0.5) { find_satellites(j,kd); } } for(j=0;j<NGAL;++j) { if(flag[j] && GAL[j].psat<0.5) { ngrp++; GAL[j].igrp = j; GAL[j].mtot *= GAL[j].weight; xtmp[ngrp] = -GAL[j].mtot; itmp[ngrp] = j; GAL[j].listid = ngrp; if(fluxlim) xtmp[ngrp] *= fluxlim_correction(GAL[j].redshift); } } if(RECENTERING && niter!=MAX_ITER) { for(j=1;j<=ngrp;++j) { i = itmp[j]; if(GAL[i].mass>5e12 && GAL[i].psat<0.5) { icen_new = group_center(i,kd); if(icen_new==-1) { printf("ZERO %.1f %e\n",GAL[i].nsat,GAL[i].mass, GAL[i].psat); exit(0); } if(icen_new != i) { // transfer the halo values //printf("REC %d %d %d %d\n",niter, i, icen_new, j); //fflush(stdout); itmp[j] = icen_new; GAL[i].psat=1; GAL[i].igrp = icen_new; //need to swap all of them, fyi... GAL[icen_new].psat =0; GAL[icen_new].mtot = GAL[i].mtot; GAL[icen_new].nsat = GAL[i].nsat; GAL[i].nsat = 0; } } } } // sort groups by their total stellar mass sort2(ngrp,xtmp,itmp); // reassign the halo masses nsat_tot = galden = 0; // reset the sham counters if(fluxlim) density2host_halo_zbins3(-1,0); //density2host_halo_zbins(-1); for(j=1;j<=ngrp;++j) { GAL[i].grp_rank = j; i= itmp[j]; galden += 1/GAL[i].vmax; if(fluxlim==1) //GAL[i].mass = density2host_halo_zbins(GAL[i].redshift); GAL[i].mass = density2host_halo_zbins3(GAL[i].redshift,GAL[i].vmax); else GAL[i].mass = density2host_halo(galden); GAL[i].rad = pow(3*GAL[i].mass/(4.*PI*DELTA_HALO*RHO_CRIT*OMEGA_M),THIRD); GAL[i].theta = GAL[i].rad/GAL[i].rco; GAL[i].sigmav = sqrt(BIG_G*GAL[i].mass/2.0/GAL[i].rad*(1+GAL[i].redshift)); nsat_tot += GAL[i].nsat; } //density2host_halo_zbins3(1000,1); t4 = omp_get_wtime(); //for the satellites, set their host halo mass for(j=0;j<NGAL;++j) if(GAL[j].psat>0.5) GAL[j].mass = GAL[GAL[j].igrp].mass; fprintf(stderr,"iter %d ngroups=%d fsat=%f (kdtime=%.2f %.2f)\n", niter,ngrp,nsat_tot/NGAL,t1-t0,t4-t3); } /* Output to disk the final results */ if(OUTPUT) { for(i=0;i<NGAL;++i) { printf("%d %f %f %f %e %e %f %e %e %e %d %e\n", i, GAL[i].ra*180/PI, GAL[i].dec*180/PI,GAL[i].redshift, GAL[i].mstellar, GAL[i].vmax, GAL[i].psat, GAL[i].mass, GAL[i].nsat, GAL[i].mtot, GAL[i].igrp, GAL[i].weight); } fflush(stdout); } /* let's free up the memory of the kdtree */ kd_free(kd); } /* Distance-redshift relation */ float func_dr1(float z) { return pow(OMEGA_M*(1+z)*(1+z)*(1+z)+(1-OMEGA_M),-0.5); } float distance_redshift(float z) { float x; if(z<=0)return 0; x= c_on_H0*qromo(func_dr1,0.0,z,midpnt); return x; } /* Here is the main code to find satellites for a given central galaxy */ void find_satellites(int icen, void *kd) { int j, k; float dx, dy, dz, theta, prob_ang, vol_corr, prob_rad, grp_lum, p0, range; float cenDist, bprob; void *set; int *pch; double cen[3]; double sat[3]; // check if this galaxy has already been given to a group if(GAL[icen].psat>0.5)return; // Use the k-d tree kd to identify the nearest galaxies to the central. cen[0] = GAL[icen].x; cen[1] = GAL[icen].y; cen[2] = GAL[icen].z; // Nearest neighbour search should go out to about 4*sigma, the velocity dispersion of the SHAMed halo. // find all galaxies in 3D that are within 4sigma of the velocity dispersion range = 4*GAL[icen].sigmav/100.0*(1+GAL[icen].redshift)/ sqrt(OMEGA_M*pow(1+GAL[icen].redshift,3.0) + 1-OMEGA_M); set = kd_nearest_range(kd, cen, range); // Set now contains the nearest neighbours within a distance range. Grab their info. while( !kd_res_end(set)) { // Get index value of the current neighbor pch = (int*)kd_res_item(set, sat); j = *pch; kd_res_next(set); //printf("%d %d %f %f %f %f\n",j,icen,GAL[icen].x, GAL[j].x, range,sat[0]); // Skip if target galaxy is the same as the central (obviously). if(j == icen)continue; // skip if the object is more massive than the icen if(GAL[j].mstellar>=GAL[icen].mstellar)continue; // Skip if already assigned to a central. //if(GAL[j].psat>0.5)continue; // UNLESS current group has priority if(GAL[j].psat>0.5 && GAL[icen].grp_rank>GAL[GAL[j].igrp].grp_rank)continue; // check if the galaxy is outside the angular radius of the halo dz = fabs(GAL[icen].redshift - GAL[j].redshift)*SPEED_OF_LIGHT; theta = angular_separation(GAL[icen].ra,GAL[icen].dec,GAL[j].ra,GAL[j].dec); if(theta > GAL[icen].theta){ continue; } // Now determine the probability of being a satellite //(both projected onto the sky, and along the line of sight). prob_ang = radial_probability(GAL[icen].mass, theta, GAL[icen].rad, GAL[icen].theta); prob_rad = exp(-dz*dz/(2*GAL[icen].sigmav*GAL[icen].sigmav)) *SPEED_OF_LIGHT/(RT2PI*GAL[icen].sigmav); // set the background level if(GAL[j].color>0.8) bprob = BPROB_RED + (log10(GAL[j].mstellar)-9.5)*BPROB_XRED; else bprob = BPROB_BLUE + (log10(GAL[j].mstellar)-9.5)*BPROB_XBLUE; // let's put a lower limit of the prob if(bprob<0.001) bprob = 0.001; // combine them into the total probability p0 = (1 - 1/(1 + prob_ang * prob_rad / bprob)); if(isnan(p0))p0 = 1; //??? if(p0>GAL[j].psat)GAL[j].psat = p0; if(p0<0.5)continue; // this is considered a member of the group // NB if this was previously a member of other (lower-rank) // group, remove it from that. if(GAL[j].igrp>=0) { GAL[GAL[j].igrp].nsat--; GAL[GAL[j].igrp].mtot-=GAL[j].mstellar; } GAL[j].psat = p0; GAL[j].igrp = icen; GAL[icen].mtot += GAL[j].mstellar; GAL[icen].nsat++; } //exit(0); // Correct for boundary conditions if(!FLUXLIM) { dz = SPEED_OF_LIGHT* fabs(GAL[icen].redshift - MINREDSHIFT); vol_corr = 1-(0.5*erfc(dz/(ROOT2*GAL[icen].sigmav))); GAL[icen].nsat /= vol_corr; GAL[icen].mtot /= vol_corr; dz = SPEED_OF_LIGHT* fabs(GAL[icen].redshift - MAXREDSHIFT); vol_corr = 1-(0.5*erfc(dz/(ROOT2*GAL[j].sigmav))); GAL[icen].nsat /= vol_corr; GAL[icen].mtot /= vol_corr; } } /* angular separation between two points in ra/dec */ float angular_separation(float a1, float d1, float a2, float d2) { float cd1,cd2,sd1,sd2,ca1a2,sa1a2; return atan((sqrt(cos(d2)*cos(d2)*sin(a2-a1)*sin(a2-a1) + pow(cos(d1)*sin(d2) - sin(d1)*cos(d2)*cos(a2-a1),2.0)))/ (sin(d1)*sin(d2) + cos(d1)*cos(d2)*cos(a2-a1))); } /* Probability assuming a projected NFW profile */ float radial_probability(float mass, float dr, float rad, float ang_rad) { float c, x, rs, delta, f; dr = dr*rad/ang_rad; c = 10.0*pow(mass/1.0E+14,-0.11); rs = rad/c; x = dr/rs; if(x<1) f = 1/(x*x-1)*(1-log((1+sqrt(1-x*x))/x)/(sqrt(1-x*x))); if(x==1) f = 1.0/3.0; if(x>1) f = 1/(x*x-1)*(1-atan(sqrt(x*x-1))/sqrt(x*x-1)); delta = DELTA_HALO/3.0*c*c*c/(log(1+c)-c/(1+c)); return 1.0/c_on_H0*2*rs*delta*f; } /* This is calibrated from the MXXL BGS mock, * from ratio of luminosity density in redshift * bins relative to total 1/Vmax-weighted luminosity * density. (SLightly different than Yang et al). * * luminosity_correction.py */ float fluxlim_correction(float z) { return pow(10.0,pow(z/0.18,2.8)*0.5); // rho_lum(z) for SDSS (r=17.77; MXXL) return 1; //no correction return pow(10.0,pow(z/0.16,2.5)*0.6); // SDSS (sham mock) return pow(10.0,pow(z/0.40,4.0)*0.4); // from rho_lum(z) BGS }
camera3d.h
#ifndef CAMERA3D_H_ #define CAMERA3D_H_ #include "vector3d.h" #include "scene3d.h" #include <cassert> #include <iostream> #include <fstream> #include <memory> class camera3d { public: vector3d origin; float fov; int width; int height; camera3d() : xray(1, 0, 0) , yray(0, 1, 0) , zray(0, 0, 1) , fov(120) , width(640) , height(480) {} void look_at(const vector3d &point) { zray = point - origin; zray.normalize(); yray = vector3d(0, 1, 0); xray = zray * yray; xray.normalize(); yray = xray * zray; yray.normalize(); const double eps = 1e-7; assert(fabs(abs(xray) - 1) < eps); assert(fabs(abs(yray) - 1) < eps); assert(fabs(abs(zray) - 1) < eps); assert(fabs(dot_product(xray, yray)) < eps); assert(fabs(dot_product(xray, zray)) < eps); assert(fabs(dot_product(yray, zray)) < eps); } void render_to_file(const scene3d &scene, const char *path); private: vector3d xray; vector3d yray; vector3d zray; }; void camera3d::render_to_file(const scene3d &scene, const char *path) { const double pi = 3.1415926535897932384626433832795; const double focal_length_inv = 2 * tan(fov / 2) / width; std::unique_ptr<uint8_t[]> data(new uint8_t[width * height * 3]); vector3d dx = xray * focal_length_inv; vector3d dy = yray * focal_length_inv; #pragma omp parallel for for (int i = 0; i < height; ++i) { uint8_t *row = data.get() + i * (width * 3); ray3d ray; ray.origin = origin; vector3d direction = zray + dy * (i - (height - 1) * 0.5) - dx * ((width - 1) * 0.5); for (int j = 0; j < width; ++j) { ray.direction = direction; ray.direction.normalize(); color3d color = scene.trace(ray); int alpha = color.a + 1; row[0] = color.r * alpha >> 8; row[1] = color.g * alpha >> 8; row[2] = color.b * alpha >> 8; row += 3; direction += dx; } } std::ofstream fout(path, std::ios::out | std::ios::binary); fout << "P6" << std::endl; fout << width << ' ' << height << std::endl; fout << 255 << std::endl; fout.write((char*)data.get(), width * height * 3); } #endif
kmeans13.c
/* Description: This program executes the K-Means algorithm for random vectors of arbitrary number and dimensions Author: Georgios Evangelou (1046900) Year: 5 Parallel Programming in Machine Learning Problems Electrical and Computer Engineering Department, University of Patras System Specifications: CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips) GPU: Nvidia GTX 1050 (dual-fan, overclocked) RAM: 8GB (dual-channel, @2666 MHz) Version Notes: Compiles with: gcc kmeans13.c -o kmeans13 -lm -fopt-info -fopenmp -O3 Inherits all settings of the previous version unless stated otherwise Minor differences compared to kmeans12 (experimental changes to reach optimal settings) Executes the algorithm for 100000 vectors of 1000 dimensions and 100 classes and produces correct results Needs ~5 seconds to reach 16 repetitions with all optimizations and schedule(static) Profiler Output: Flat profile: Each sample counts as 0.01 seconds. % cumulative self self total time seconds seconds calls Ts/call Ts/call name 91.81 13.36 13.36 frame_dummy 4.54 14.02 0.66 estimateCenters 3.71 14.56 0.54 SetVec */ // ******************************************************************* #pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline", "unsafe-math-optimizations") //Apply O3 and extra optimizations #pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system #pragma GCC target("avx") //Enable AVX // ******************************************************************* #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> // *************************************************** #define N 100000 #define Nv 1000 #define Nc 100 #define THRESHOLD 0.000001 #define MAX_REPETITIONS 16 // *************************************************** float Vectors[N][Nv]; // N vectors of Nv dimensions float Centers[Nc][Nv]; // Nc vectors of Nv dimensions int Class_of_Vec[N]; // Class of each Vector // *************************************************** // Print vectors // *************************************************** void printVectors(void) { int i, j; printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"); for (i = 0; i < N; i++) { printf("--------------------\n"); printf(" Vector #%d is:\n", i); for (j = 0; j < Nv; j++) printf(" %f\n", Vectors[i][j]); } printf("--------------------\n"); printf("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n\n"); } // *************************************************** // Print centers // *************************************************** void printCenters(void) { int i, j; printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"); for (i = 0; i < Nc; i++) { printf("--------------------\n"); printf(" Center #%d is:\n", i); for (j = 0; j < Nv; j++) printf(" %f\n", Centers[i][j]); } printf("--------------------\n"); printf("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n\n"); } // *************************************************** // Print the class of each vector // *************************************************** void printClasses(void) { int i, j; printf("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"); for (i = 0; i < N; i++) { printf("--------------------\n"); printf(" Class of Vector #%d is:\n", i); printf(" %d\n", Class_of_Vec[i]); } printf("--------------------\n"); printf("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n\n"); } // **************************************************** // Returns 1 if a Vector is not in an array of vectors // **************************************************** int notVectorInCenters(float Vec[Nv], int maxIndex) { // Examining all the centers until <maxIndex> //printf("\nChecking if vec is in centers...\n"); for (int c=0; c<maxIndex; c++) { //printf("> Checking center %d...\n", c); int flag = 1; for (int i=0; i<Nv; i++) { //printf(">> Checking dim %d...\n", i); if (Vec[i] != Centers[c][i]) { //printf(">>> This dimension is different, so no need to keep checking this center.\n"); flag = 0; break; } } if (flag) // If <flag> remains equal to 1, then the vector <Vec> is equal to current examined center <c> return 0; // So <Vec> is unsuitable to become a new Center } return 1; } // **************************************************** // Picks a new center when the last one has no neighbours // **************************************************** void pickSubstituteCenter(int indexOfCenterToChange){ int currentVec = 0; // Searching for a vector that is not a center, so as to mark it as one printf("> Now searching for a substitute center...\n"); do { printf(">> Now examining vec:%d\n", currentVec); if (notVectorInCenters(Vectors[currentVec], Nc)) { printf(">>> Current vec is not in existing centers\n"); for (int i=0; i<Nv; i++) Centers[indexOfCenterToChange][i] = Vectors[currentVec][i]; printf(">>> Substituted old center with current vector\n"); return; // If a substitute center is found, stop this function } printf(">>> WARNING: If the center was substituted, this line must not be present\n"); currentVec ++; // else contunue searching } while (currentVec<N); printf("\n"); return; } // **************************************************** // Chooses the first unique Nc vectors as class centers // **************************************************** void initCenters2() { int currentCenter=0, currentVec=0; do { if (notVectorInCenters(Vectors[currentVec], currentCenter)) { for (int i=0; i<Nv; i++) Centers[currentCenter][i] = Vectors[currentVec][i]; currentCenter ++; } currentVec++; } while (currentCenter<Nc); } // ************************************************************************* // Returns the sum of distances between all vectors and their closest center // ************************************************************************* float estimateClasses() { float tot_min_distances = 0; #pragma omp parallel for reduction(+:tot_min_distances) schedule(static) for (int w=0; w<N; w++) { float min_dist = 1e30; int temp_class = -1; for (int i=0; i<Nc; i++) { float dist = 0; #pragma omp simd reduction(+:dist) // If <reduction> is omitted, the compiler protects from math error and does not perform SIMD for (int j=0; j<Nv; j++) dist += (Vectors[w][j]-Centers[i][j]) * (Vectors[w][j]-Centers[i][j]); // Distance between Vec and Center i if (dist < min_dist) { temp_class = i; min_dist = dist; } } Class_of_Vec[w] = temp_class; // Update the current vector's class with the new one tot_min_distances += sqrt(min_dist); // Increase the sum of distances } return tot_min_distances; } // *************************************************** // Find the new centers // *************************************************** void estimateCenters() { int Centers_matchings[Nc] = {0}; int needToRecalculateCenters = 0; // Zero all center vectors for (int i = 0; i < Nc; i++) for (int j = 0; j < Nv; j++) Centers[i][j] = 0; // Add each vector's values to its corresponding center for (int w = 0; w < N; w ++) { Centers_matchings[Class_of_Vec[w]] ++; for (int j = 0; j<Nv; j++) Centers[Class_of_Vec[w]][j] += Vectors[w][j]; } for (int i = 0; i < Nc; i++) { if (Centers_matchings[i] != 0) for (int j = 0; j < Nv; j++) Centers[i][j] /= Centers_matchings[i]; else { printf("\nWARNING: Center %d has no members.\n", i); pickSubstituteCenter(i); needToRecalculateCenters = 1; break; } } if (needToRecalculateCenters == 1) estimateCenters(); } // *************************************************** // Initializing the vectors with random values // *************************************************** void SetVec( void ) { for(int i = 0 ; i< N ; i++ ) for(int j = 0 ; j< Nv ; j++ ) Vectors[i][j] = (1.0*rand())/RAND_MAX ; } // *************************************************** // The main program // *************************************************** int main( int argc, const char* argv[] ) { int repetitions = 0; float totDist, prevDist, diff; printf("--------------------------------------------------------------------------------------------------\n"); printf("This program executes the K-Means algorithm for random vectors of arbitrary number and dimensions.\n"); printf("Current configuration has %d Vectors, %d Classes and %d Elements per vector.\n", N, Nc, Nv); printf("--------------------------------------------------------------------------------------------------\n"); printf("Now initializing vectors...\n"); SetVec() ; printf("Now initializing centers...\n"); initCenters2() ; //printf("\nThe vectors were initialized with these values:"); //printVectors(); //printf("\n\nThe centers were initialized with these values:"); //printCenters(); totDist = 1.0e30; printf("Now running the main algorithm...\n\n"); do { repetitions++; prevDist = totDist ; totDist = estimateClasses() ; estimateCenters() ; diff = (prevDist-totDist)/totDist ; //printf("\n\n\nNew centers are:"); //printCenters(); printf(">> REPETITION: %3d || ", repetitions); printf("DISTANCE IMPROVEMENT: %.6f \n", diff); } while( (diff > THRESHOLD) && (repetitions < MAX_REPETITIONS) ) ; printf("\n\nProcess finished!\n"); printf("Total repetitions were: %d\n", repetitions); /* printf("\n\nFinal centers are:"); printCenters() ; printf("\n\nFinal classes are:"); printClasses() ; //printf("\n\nTotal distance is %f\n", totDist); */ return 0 ; } //**********************************************************************************************************
round_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> int ref_round_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor->dim_num < 4) { float* input_data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = round(out_data[i]); } return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = round(src[i]); } } return 0; } return -1; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { // exec_node->inplace_map[0] = 0; // exec_node->inplace_map[1] = 0; // exec_node->inplace_map_num = 1; return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { // exec_node->inplace_map_num = 0; return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int layout = ir_graph->graph_layout; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); // inplace inference // if(input_tensor->data != output_tensor->data) // { // TLOG_ERR("input and output are not the same mem\n"); // set_tengine_errno(EFAULT); // return -1; // } int ret = ref_round_fp32(input_tensor, output_tensor, exec_graph->num_thread); if (ret != 0) return -1; return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_round_ref_op() { return register_builtin_node_ops(OP_ROUND, &hcl_node_ops); } int unregister_round_ref_op() { return unregister_builtin_node_ops(OP_ROUND, &hcl_node_ops); }
general_basis_get_vec.h
#ifndef _GENERAL_BASIS_GET_VEC_H #define _GENERAL_BASIS_GET_VEC_H #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "misc.h" namespace basis_general { template<class T> bool inline update_out_dense(std::complex<double> c, int sign, npy_intp n_vec,const std::complex<T> *in, std::complex<T> *out){ for(npy_intp i=0;i<n_vec;i++){ out[i] += T(sign) * std::complex<T>(c) * in[i]; } return true; } template<class T> bool inline update_out_dense(std::complex<double> c, int sign, npy_intp n_vec,const T *in, T *out){ if(std::abs(c.imag())>1.1e-15){ return false; } else{ T re = c.real(); for(npy_intp i=0;i<n_vec;i++){ out[i] += T(sign) * re * in[i]; } return true; } } template<class I,class T> bool get_vec_rep(general_basis_core<I> *B, I s, int &sign, const int nt, const npy_intp n_vec, const npy_intp Ns_full, const T in[], std::complex<double> c, T out[], const int depth) { bool err = true; if(nt<=0){ const npy_intp full = (Ns_full - s - 1)*n_vec; err = update_out_dense(c,sign,n_vec,in,&out[full]); return err; } int per = B->pers[depth]; double q = (2.0*M_PI*B->qs[depth])/per; std::complex<double> cc = std::exp(std::complex<double>(0,-q)); if(depth < nt-1){ for(int j=0;j<per && err;j++){ err = get_vec_rep(B,s,sign,nt,n_vec,Ns_full,in,c,out,depth+1); c *= cc; s = B->map_state(s,depth,sign); } return err; } else{ for(int j=0;j<per && err;j++){ const npy_intp full = (Ns_full - s - 1)*n_vec; err = update_out_dense(c,sign,n_vec,in,&out[full]); c *= cc; s = B->map_state(s,depth,sign); } return err; } } template<class I,class T> bool get_vec_rep_pcon(general_basis_core<I> *B, I s, int &sign, const int nt, const npy_intp n_vec, const I basis_pcon[], const npy_intp Ns_full, const T in[], std::complex<double> c, T out[], const int depth) { bool err = true; if(nt<=0){ const npy_intp full = binary_search(Ns_full,basis_pcon,s)*n_vec; err = update_out_dense(c,sign,n_vec,in,&out[full]); return err; } int per = B->pers[depth]; double q = (2.0*M_PI*B->qs[depth])/per; std::complex<double> cc = std::exp(std::complex<double>(0,-q)); if(depth < nt-1){ for(int j=0;j<per && err;j++){ err = get_vec_rep_pcon(B,s,sign,nt,n_vec,basis_pcon,Ns_full,in,c,out,depth+1); c *= cc; s = B->map_state(s,depth,sign); } return err; } else{ for(int j=0;j<per && err;j++){ const npy_intp full = binary_search(Ns_full,basis_pcon,s)*n_vec; err = update_out_dense(c,sign,n_vec,in,&out[full]); c *= cc; s = B->map_state(s,depth,sign); } return err; } } template<class I,class J,class T> bool get_vec_general_pcon_dense(general_basis_core<I> *B, const I basis[], const J n[], const npy_intp n_vec, const npy_intp Ns, const npy_intp Ns_full, const I basis_pcon[], const T in[], T out[]) { bool err = true; const int nt = B->get_nt(); const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); double norm = 1.0; for(int i=0;i<nt;i++){ norm *= B->pers[i]; } #pragma omp parallel for schedule(dynamic,chunk) firstprivate(norm) for(npy_intp k=0;k<Ns;k++){ if(!err){continue;} std::complex<double> c = 1.0/std::sqrt(n[k]*norm); int sign = 1; bool local_err = get_vec_rep_pcon(B,basis[k],sign,nt,n_vec,basis_pcon,Ns_full,&in[k*n_vec],c,out,0); if(!local_err){ #pragma omp critical err = local_err; } } return err; } template<class I,class J,class T> bool get_vec_general_dense(general_basis_core<I> *B, const I basis[], const J n[], const npy_intp n_vec, const npy_intp Ns, const npy_intp Ns_full, const T in[], T out[]) { bool err = true; const int nt = B->get_nt(); const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); double norm = 1.0; for(int i=0;i<nt;i++){ norm *= B->pers[i]; } #pragma omp parallel for schedule(dynamic,chunk) firstprivate(norm) for(npy_intp k=0;k<Ns;k++){ if(!err){continue;} std::complex<double> c = 1.0/std::sqrt(n[k]*norm); int sign = 1; bool local_err = get_vec_rep(B,basis[k],sign,nt,n_vec,Ns_full,&in[k*n_vec],c,out,0); if(!local_err){ #pragma omp critical err = local_err; } } return err; } } #endif
DRB058-jacobikernel-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two parallel for loops within one single parallel region, combined with private() and reduction(). */ #include <stdio.h> #include <math.h> #define MSIZE 200 int n=MSIZE, m=MSIZE, mits=1000; double tol=0.0000000001, relax = 1.0, alpha = 0.0543; double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE]; double dx, dy; void initialize () { int i, j, xx, yy; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ #pragma omp parallel for private(i ,j ,xx ,yy ) for (i = 0; i < n; i++) #pragma omp parallel for private(j ,xx ,yy ) for (j = 0; j < m; j++) { xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */ yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */ u[i][j] = 0.0; f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy) - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy); } } void jacobi () { double omega; int i, j, k; double error, resid, ax, ay, b; omega = relax; /* Initialize coefficients */ dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); ax = 1.0 / (dx * dx); /* X-direction coef */ ay = 1.0 / (dy * dy); /* Y-direction coef */ b = -2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while (k <= mits) { error = 0.0; /* Copy new solution into old */ #pragma omp parallel for private(j ) for (i = 0; i < n; i++) #pragma omp parallel for for (j = 0; j < m; j++) uold[i][j] = u[i][j]; #pragma omp parallel for private(j ,resid ) reduction(+:error) for (i = 1; i < (n - 1); i++) #pragma omp parallel for private(resid ) reduction(+:error) for (j = 1; j < (m - 1); j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } /* Error check */ k = k + 1; error = sqrt (error) / (n * m); } /* End iteration loop */ printf ("Total Number of Iterations:%d\n", k); printf ("Residual:%E\n", error); } int main() { initialize(); jacobi(); return 0; }
GB_binop__minus_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int64) // A.*B function (eWiseMult): GB (_AemultB_08__minus_int64) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int64) // A.*B function (eWiseMult): GB (_AemultB_04__minus_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int64) // A*D function (colscale): GB (_AxD__minus_int64) // D*A function (rowscale): GB (_DxB__minus_int64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int64) // C=scalar+B GB (_bind1st__minus_int64) // C=scalar+B' GB (_bind1st_tran__minus_int64) // C=A+scalar GB (_bind2nd__minus_int64) // C=A'+scalar GB (_bind2nd_tran__minus_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT64 || GxB_NO_MINUS_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
displacement_lagrangemultiplier_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; /// The epsilon tolerance definition static constexpr double Tolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /// Constructor. /** * @param DispRatioTolerance Relative tolerance for displacement error * @param DispAbsTolerance Absolute tolerance for displacement error * @param LMRatioTolerance Relative tolerance for lagrange multiplier error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false); // The displacement solution mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The contact solution mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // The default parameters Parameters default_parameters = Parameters(R"( { "ensure_contact" : false, "print_convergence_criterion" : false, "displacement_relative_tolerance" : 1.0e-4, "displacement_absolute_tolerance" : 1.0e-9, "contact_displacement_relative_tolerance" : 1.0e-4, "contact_displacement_absolute_tolerance" : 1.0e-9 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); // The displacement solution mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble(); // The contact solution mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false); } // Copy constructor. DisplacementLagrangeMultiplierContactCriteria( DisplacementLagrangeMultiplierContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) { } /// Destructor. ~DisplacementLagrangeMultiplierContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something // Initialize TDataType disp_solution_norm = 0.0, lm_solution_norm = 0.0, disp_increase_norm = 0.0, lm_increase_norm = 0.0; IndexType disp_dof_num(0),lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType dof_value = 0.0, dof_incr = 0.0; // Loop over Dofs #pragma omp parallel for reduction(+:disp_solution_norm,lm_solution_norm,disp_increase_norm,lm_increase_norm,disp_dof_num,lm_dof_num,dof_id,dof_value,dof_incr) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); if (mActiveDofs[dof_id]) { dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; const auto curr_var = it_dof->GetVariable(); if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { lm_solution_norm += dof_value * dof_value; lm_increase_norm += dof_incr * dof_incr; lm_dof_num++; } else { disp_solution_norm += dof_value * dof_value; disp_increase_norm += dof_incr * dof_incr; disp_dof_num++; } } } if(disp_increase_norm < Tolerance) disp_increase_norm = 1.0; if(lm_increase_norm < Tolerance) lm_increase_norm = 1.0; if(disp_solution_norm < Tolerance) disp_solution_norm = 1.0; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm); const TDataType lm_ratio = lm_solution_norm > Tolerance ? std::sqrt(lm_increase_norm/lm_solution_norm) : 0.0; const TDataType disp_abs = std::sqrt(disp_increase_norm)/static_cast<TDataType>(disp_dof_num); const TDataType lm_abs = std::sqrt(lm_increase_norm)/static_cast<TDataType>(lm_dof_num); // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance; } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT(" LAGRANGE MUL:\tRATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << " LAGRANGE MUL:\tRATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } // We check if converged const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance); const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance); if (disp_converged && lm_converged) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H */
ordered-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-ompexp" } */ /* LLVM LOCAL test not applicable */ /* { dg-require-fdump "" } */ extern void bar(int); void foo (void) { #pragma omp ordered bar(0); #pragma omp ordered { bar(1); bar(2); } } /* { dg-final { scan-tree-dump-times "GOMP_ordered_start" 2 "ompexp" } } */ /* { dg-final { scan-tree-dump-times "GOMP_ordered_end" 2 "ompexp" } } */ /* { dg-final { cleanup-tree-dump "ompexp" } } */
common.c
#include "common.h" bool IS_EDGE(const int e){ return (e >= 0)? true : false; } static void BE_EDGE(int *e) { *e = ((*e) + 1) * (-1); } static void BE_NOEDGE(int *e) { *e = -1 * (*e) - 1; } void REVERSE(int *e) { if(IS_EDGE(*e)) BE_NOEDGE(e); else BE_EDGE(e); } void printb(const uint64_t v) { uint64_t mask = 0x1ULL << (sizeof(v) * CHAR_BIT - 1); int sum = 0; do{ putchar(mask & v ? '1' : '0'); sum++; if(sum%8==0) putchar(','); } while (mask >>= 1); } void create_adjacency(const int nodes, const int lines, const int max_degree, const int edge[lines][2], int adjacency[nodes][max_degree], int *degree) { for(int i=0;i<nodes;i++) degree[i] = 0; for(int i=0;i<lines;i++){ int n1 = edge[i][0]; int n2 = edge[i][1]; if(IS_EDGE(n1) != IS_EDGE(n2)) ERROR("uga %d %d\n", n1, n2); if(IS_EDGE(n1)){ adjacency[n1][degree[n1]++] = n2; adjacency[n2][degree[n2]++] = n1; } } // For debug for(int i=0;i<nodes;i++) if(degree[i] > max_degree) ERROR("uga (%d)\n", degree[i]); } bool has_duplicated_vertex(const int e00, const int e01, const int e10, const int e11) { return (e00 == e10 || e01 == e11 || e00 == e11 || e01 == e10); } bool has_duplicated_edge(const int e00, const int e01, const int e10, const int e11) { return (e00 == e10 && e01 == e11) || (e00 == e11 && e01 == e10); } int getRandom(const int max) { return (int)(random()*((double)max)/(1.0+RAND_MAX)); } int DISTANCE(const int v, const int w, const int height) { int w0 = WIDTH (v, height); int h0 = HEIGHT(v, height); int w1 = WIDTH (w, height); int h1 = HEIGHT(w, height); return abs(w0 - w1) + abs(h0 - h1); } int WIDTH(const int v, const int height) { return v/height; } int HEIGHT(const int v, const int height) { return v%height; } int ROTATE(const int v, const int height, const int width, const int groups, const int degree) { if(groups != 2 && groups != 4) ERROR("Invalid groups\n"); int w = WIDTH (v, height); int h = HEIGHT(v, height); if(groups == 2){ if(degree != 180) ERROR("Invalid degree\n"); // degree == 180 return (width-w-1)*height + (height-h-1); } else{ // groups == 4 if(degree != 90 && degree != 180 && degree != 270) ERROR("Invalid degree\n"); if(degree == 90) return h*height + (height-w-1); else if(degree == 180) return (height-w-1)*height + (height-h-1); else return (height-h-1)*height + w; // degree == 270 } } bool check_symmetric_edge(const int lines, const int edge[lines][2], const int height, const int width, const int based_height, const int groups) { assert(lines%groups == 0); int tmp_edge[2], based_lines = lines / groups; if(groups == 2){ for(int i=0;i<based_lines;i++){ if(!IS_EDGE(edge[i][0])) continue; for(int j=0;j<2;j++) tmp_edge[j] = ROTATE(edge[i][j], height, width, groups, 180); if(!has_duplicated_edge(edge[based_lines+i][0], edge[based_lines+i][1], tmp_edge[0], tmp_edge[1])) if(!( WIDTH (edge[based_lines+i][0], height) + WIDTH (edge[based_lines+i][1], height) == (width-1) && HEIGHT(edge[based_lines+i][0], height) + HEIGHT(edge[based_lines+i][1], height) == (height-1))){ printf("i=%d: %d,%d - %d,%d %d,%d - %d,%d\n", i, WIDTH(edge[based_lines+i][0], height), HEIGHT(edge[based_lines+i][0], height), WIDTH(edge[based_lines+i][1], height), HEIGHT(edge[based_lines+i][1], height), WIDTH(tmp_edge[0], height), HEIGHT(tmp_edge[0], height), WIDTH(tmp_edge[1], height), HEIGHT(tmp_edge[1], height)); return false; } } } else if(groups == 4){ // 90 degrees for(int i=0;i<based_lines;i++){ if(!IS_EDGE(edge[i][0])) continue; for(int j=0;j<2;j++) tmp_edge[j] = ROTATE(edge[i][j], height, width, groups, 90); if(!has_duplicated_edge(tmp_edge[0], tmp_edge[1], edge[based_lines+i][0], edge[based_lines+i][1])){ if(!( WIDTH (edge[based_lines+i][0], height) + WIDTH (edge[based_lines+i][1], height) == (width-1) && HEIGHT(edge[based_lines+i][0], height) + HEIGHT(edge[based_lines+i][1], height) == (height-1))){ printf("A i=%d: %d,%d-%d,%d %d,%d-%d,%d\n", i, WIDTH(edge[based_lines+i][0], height), HEIGHT(edge[based_lines+i][0], height), WIDTH(edge[based_lines+i][1], height), HEIGHT(edge[based_lines+i][1], height), WIDTH(tmp_edge[0], height), HEIGHT(tmp_edge[0], height), WIDTH(tmp_edge[1], height), HEIGHT(tmp_edge[1], height)); return false; } } // 180 degrees for(int j=0;j<2;j++) tmp_edge[j] = ROTATE(edge[i][j], height, width, groups, 180); if(!has_duplicated_edge(tmp_edge[0], tmp_edge[1], edge[based_lines*2+i][0], edge[based_lines*2+i][1])){ if(!( WIDTH (edge[based_lines*2+i][0], height) + WIDTH (edge[based_lines*2+i][1], height) == (width-1) && HEIGHT(edge[based_lines*2+i][0], height) + HEIGHT(edge[based_lines*2+i][1], height) == (height-1))){ printf("B i=%d: %d,%d-%d,%d %d,%d-%d,%d\n", i, WIDTH(edge[based_lines*2+i][0], height), HEIGHT(edge[based_lines*2+i][0], height), WIDTH(edge[based_lines*2+i][1], height), HEIGHT(edge[based_lines*2+i][1], height), WIDTH(tmp_edge[0], height), HEIGHT(tmp_edge[0], height), WIDTH(tmp_edge[1], height), HEIGHT(tmp_edge[1], height)); return false; } } // 270 degrees for(int j=0;j<2;j++) tmp_edge[j] = ROTATE(edge[i][j], height, width, groups, 270); if(!has_duplicated_edge(tmp_edge[0], tmp_edge[1], edge[based_lines*3+i][0], edge[based_lines*3+i][1])){ if(!( WIDTH (edge[based_lines*3+i][0], height) + WIDTH (edge[based_lines*3+i][1], height) == (width-1) && HEIGHT(edge[based_lines*3+i][0], height) + HEIGHT(edge[based_lines*3+i][1], height) == (height-1))){ printf("C i=%d: %d,%d-%d,%d %d,%d-%d,%d\n", i, WIDTH(edge[based_lines*3+i][0], height), HEIGHT(edge[based_lines*3+i][0], height), WIDTH(edge[based_lines*3+i][1], height), HEIGHT(edge[based_lines*3+i][1], height), WIDTH(tmp_edge[0], height), HEIGHT(tmp_edge[0], height), WIDTH(tmp_edge[1], height), HEIGHT(tmp_edge[1], height)); return false; } } } } return true; } void output_edge(const int lines, const int edge[lines*2], const int height) { for(int i=0;i<lines;i++) printf("%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height), WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height)); } void copy_edge(int *restrict buf1, const int *restrict buf2, const int n) { #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0;i<n;i++) buf1[i] = buf2[i]; } void swap(int *a, int *b) { int tmp = *a; *a = *b; *b = tmp; } bool check_loop(const int lines, const int edge[lines][2]) { timer_start(TIMER_CHECK); bool flag = true; #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0;i<lines;i++) if(edge[i][0] == edge[i][1]) flag = false; timer_stop(TIMER_CHECK); if(flag == false){ for(int i=0;i<lines;i++) if(edge[i][0] == edge[i][1]){ printf("%d: %d %d <--\n", i, edge[i][0], edge[i][1]); } else{ printf("%d: %d %d\n", i, edge[i][0], edge[i][1]); } } return flag; } bool check_duplicate_all_edge(const int lines, const int edge[lines][2]) { timer_start(TIMER_CHECK); bool flag = true; #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0;i<lines;i++) for(int j=i+1;j<lines;j++) if(has_duplicated_edge(edge[i][0], edge[i][1], edge[j][0], edge[j][1])){ printf("%d %d %d %d\n", edge[i][0], edge[i][1], edge[j][0], edge[j][1]); flag = false; } timer_stop(TIMER_CHECK); return flag; } bool check_duplicate_tmp_edge(const int g_opt, const int groups, int tmp_edge[groups*g_opt][2]) { timer_start(TIMER_CHECK); bool flag = true; for(int i=0;i<g_opt;i++){ int tmp[2] = {tmp_edge[i][0], tmp_edge[i][1]}; for(int j=g_opt;j<groups*g_opt;j++) if(has_duplicated_edge(tmp[0], tmp[1], tmp_edge[j][0], tmp_edge[j][1])) flag = false; } timer_stop(TIMER_CHECK); return flag; } bool check_duplicate_current_edge(const int lines, const int edge[lines][2], const int tmp_lines, const int tmp_edge[tmp_lines][2], const int tmp_line[2], const int groups, const int g_opt, const bool is_center) { timer_start(TIMER_CHECK); int based_lines = lines/groups; bool flag = true; if(g_opt == D_2G_OPT){ int tmp_line0 = tmp_line[0]%based_lines; int tmp_line1 = tmp_line[1]%based_lines; #ifdef _OPENMP #pragma omp parallel for #endif for(int i=rank;i<based_lines;i+=procs) if(i != tmp_line0 && i != tmp_line1) for(int j=0;j<tmp_lines;j++) if(has_duplicated_edge(edge[i][0], edge[i][1], tmp_edge[j][0], tmp_edge[j][1])) flag = false; } else if(g_opt == D_1G_OPT){ int tmp_line0 = tmp_line[0]%based_lines; if(! is_center){ #ifdef _OPENMP #pragma omp parallel for #endif for(int i=rank;i<based_lines;i+=procs) if(i != tmp_line0) for(int j=0;j<tmp_lines;j++) if(has_duplicated_edge(edge[i][0], edge[i][1], tmp_edge[j][0], tmp_edge[j][1])) flag = false; } else{ #ifdef _OPENMP #pragma omp parallel for #endif for(int i=rank;i<lines;i+=procs) if(i%based_lines != tmp_line0) for(int j=0;j<tmp_lines;j++) if(has_duplicated_edge(edge[i][0], edge[i][1], tmp_edge[j][0], tmp_edge[j][1])) flag = false; } } MPI_Allreduce(MPI_IN_PLACE, &flag, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_WORLD); timer_stop(TIMER_CHECK); return flag; } void create_rotate_hash(const int nodes, const int height, const int width, const int groups, int *rotate_hash) { int based_nodes = nodes / groups; if(groups == 1){ for(int i=0;i<based_nodes;i++) rotate_hash[i] = i; } else if(groups == 2){ int based_height = height / 2; for(int i=0;i<based_nodes;i++){ int j = (i/based_height) * height + (i%based_height); rotate_hash[j] = i; rotate_hash[ROTATE(j, height, width, groups, 180)] = i + based_nodes; } } else{ int based_height = height / 2; for(int i=0;i<based_nodes;i++){ int j = (i/based_height) * height + (i%based_height); rotate_hash[j] = i; rotate_hash[ROTATE(j, height, width, groups, 90)] = i + based_nodes; rotate_hash[ROTATE(j, height, width, groups, 180)] = i + based_nodes * 2; rotate_hash[ROTATE(j, height, width, groups, 270)] = i + based_nodes * 3; } } }
pzgstrf.c
/*! \file Copyright (c) 2003, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from U.S. Dept. of Energy) All rights reserved. The source code is distributed under BSD license, see the file License.txt at the top-level directory. */ /*! @file * \brief Performs LU factorization in parallel * * <pre> * -- Distributed SuperLU routine (version 6.1) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 1, 2014 * * Modified: * September 1, 1999 * Feburary 7, 2001 use MPI_Isend/MPI_Irecv * October 15, 2008 latency-reducing panel factorization * July 12, 2011 static scheduling and arbitrary look-ahead * March 13, 2013 change NTAGS to MPI_TAG_UB value * September 24, 2015 replace xLAMCH by xMACH, using C99 standard. * December 31, 2015 rename xMACH to xMACH_DIST. * September 30, 2017 optimization for Intel Knights Landing (KNL) node . * June 1, 2018 add parallel AWPM pivoting; add back arrive_at_ublock() * February 8, 2019 version 6.1.1 * * Sketch of the algorithm * * ======================= * * The following relations hold: * * A_kk = L_kk * U_kk * * L_ik = Aik * U_kk^(-1) * * U_kj = L_kk^(-1) * A_kj * * ---------------------------------- * | | | * ----|----------------------------- * | | \ U_kk| | * | | \ | U_kj | * | |L_kk \ | || | * ----|-------|---------||---------- * | | | \/ | * | | | | * | | | | * | | | | * | | L_ik ==> A_ij | * | | | | * | | | | * | | | | * ---------------------------------- * * Handle the first block of columns separately. * * Factor diagonal and subdiagonal blocks and test for exact * singularity. ( pzgstrf2(0), one column at a time ) * * Compute block row of U * * Update trailing matrix * * Loop over the remaining blocks of columns. * mycol = MYCOL( iam, grid ); * myrow = MYROW( iam, grid ); * N = nsupers; * For (k = 1; k < N; ++k) { * krow = PROW( k, grid ); * kcol = PCOL( k, grid ); * Pkk = PNUM( krow, kcol, grid ); * * * Factor diagonal and subdiagonal blocks and test for exact * singularity. * if ( mycol == kcol ) { * pzgstrf2(k), one column at a time * } * * * Parallel triangular solve * if ( iam == Pkk ) multicast L_k,k to this process row; * if ( myrow == krow && mycol != kcol ) { * Recv L_k,k from process Pkk; * for (j = k+1; j < N; ++j) * if ( PCOL( j, grid ) == mycol && A_k,j != 0 ) * U_k,j = L_k,k \ A_k,j; * } * * * Parallel rank-k update * if ( myrow == krow ) multicast U_k,k+1:N to this process column; * if ( mycol == kcol ) multicast L_k+1:N,k to this process row; * if ( myrow != krow ) { * Pkj = PNUM( krow, mycol, grid ); * Recv U_k,k+1:N from process Pkj; * } * if ( mycol != kcol ) { * Pik = PNUM( myrow, kcol, grid ); * Recv L_k+1:N,k from process Pik; * } * for (j = k+1; k < N; ++k) { * for (i = k+1; i < N; ++i) * if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid ) * && L_i,k != 0 && U_k,j != 0 ) * A_i,j = A_i,j - L_i,k * U_k,j; * } * } * * </pre> */ #include <math.h> /*#include "mkl.h"*/ #include "superlu_zdefs.h" #ifdef GPU_ACC #include "cublas_utils.h" /*#include "cublas_zgemm.h"*/ // #define NUM_CUDA_STREAMS 16 // #define NUM_CUDA_STREAMS 16 #endif /* Various defininations */ /* Name : SUPERNODE_PROFILE Purpose : For SuperNode Level profiling of various measurements such as gigaflop/sec obtained,bandwidth achieved: Overhead : Low */ // #define SUPERNODE_PROFILE /* Name : BAELINE Purpose : baseline to compare performance against Overhead : NA : this won't be used for running experiments */ // #define BASELINE /* Name : PHI_FRAMEWORK Purpose : To simulate and test algorithm used for offloading Phi Overhead : NA : this won't be used for running experiments */ #define PHI_FRAMEWORK #if 0 #define CACHELINE 64 /* bytes, Xeon Phi KNL */ #else #define CACHELINE 0 /* not worry about false sharing of different threads */ #endif //#define GEMM_PADLEN 1 #define GEMM_PADLEN 8 #define PZGSTRF2 pzgstrf2_trsm #define PZGSTRS2 pzgstrs2_omp extern void PZGSTRF2 (superlu_dist_options_t *, int_t, int_t, double, Glu_persist_t *, gridinfo_t *, LocalLU_t *, MPI_Request *, int, SuperLUStat_t *, int *); #ifdef _CRAY extern void PZGSTRS2 (int_t, int_t, Glu_persist_t *, gridinfo_t *, LocalLU_t *, SuperLUStat_t *, _fcd, _fcd, _fcd); #else extern void PZGSTRS2 (int_t, int_t, Glu_persist_t *, gridinfo_t *, LocalLU_t *, SuperLUStat_t *); #endif #ifdef ISORT extern void isort (int_t N, int_t * ARRAY1, int_t * ARRAY2); extern void isort1 (int_t N, int_t * ARRAY); #else int superlu_sort_perm (const void *arg1, const void *arg2) { const int_t *val1 = (const int_t *) arg1; const int_t *val2 = (const int_t *) arg2; return (*val2 < *val1); } #endif /************************************************************************/ #include "zscatter.c" /************************************************************************/ /*! \brief * * <pre> * Purpose * ======= * * PZGSTRF performs the LU factorization in parallel. * * Arguments * ========= * * options (input) superlu_dist_options_t* * The structure defines the input parameters to control * how the LU decomposition will be performed. * The following field should be defined: * o ReplaceTinyPivot (yes_no_t) * Specifies whether to replace the tiny diagonals by * sqrt(epsilon)*norm(A) during LU factorization. * * m (input) int * Number of rows in the matrix. * * n (input) int * Number of columns in the matrix. * * anorm (input) double * The norm of the original matrix A, or the scaled A if * equilibration was done. * * LUstruct (input/output) LUstruct_t* * The data structures to store the distributed L and U factors. * The following fields should be defined: * * o Glu_persist (input) Glu_persist_t* * Global data structure (xsup, supno) replicated on all processes, * describing the supernode partition in the factored matrices * L and U: * xsup[s] is the leading column of the s-th supernode, * supno[i] is the supernode number to which column i belongs. * * o Llu (input/output) LocalLU_t* * The distributed data structures to store L and U factors. * See superlu_ddefs.h for the definition of 'LocalLU_t'. * * grid (input) gridinfo_t* * The 2D process mesh. It contains the MPI communicator, the number * of process rows (NPROW), the number of process columns (NPCOL), * and my process rank. It is an input argument to all the * parallel routines. * Grid can be initialized by subroutine SUPERLU_GRIDINIT. * See superlu_ddefs.h for the definition of 'gridinfo_t'. * * stat (output) SuperLUStat_t* * Record the statistics on runtime and floating-point operation count. * See util.h for the definition of 'SuperLUStat_t'. * * info (output) int* * = 0: successful exit * < 0: if info = -i, the i-th argument had an illegal value * > 0: if info = i, U(i,i) is exactly zero. The factorization has * been completed, but the factor U is exactly singular, * and division by zero will occur if it is used to solve a * system of equations. * </pre> */ int_t pzgstrf(superlu_dist_options_t * options, int m, int n, double anorm, LUstruct_t * LUstruct, gridinfo_t * grid, SuperLUStat_t * stat, int *info) { #ifdef _CRAY _fcd ftcs = _cptofcd ("N", strlen ("N")); _fcd ftcs1 = _cptofcd ("L", strlen ("L")); _fcd ftcs2 = _cptofcd ("N", strlen ("N")); _fcd ftcs3 = _cptofcd ("U", strlen ("U")); #endif doublecomplex zero = {0.0, 0.0}; doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0}; int_t *xsup; int_t *lsub, *lsub1, *usub, *Usub_buf; int_t **Lsub_buf_2, **Usub_buf_2; doublecomplex **Lval_buf_2, **Uval_buf_2; /* pointers to starts of bufs */ doublecomplex *lusup, *lusup1, *uval, *Uval_buf; /* pointer to current buf */ int_t fnz, i, ib, ijb, ilst, it, iukp, jb, jj, klst, knsupc, lb, lib, ldv, ljb, lptr, lptr0, lptrj, luptr, luptr0, luptrj, nlb, nub, nsupc, rel, rukp, il, iu; int_t Pc, Pr; int iam, kcol, krow, yourcol, mycol, myrow, pi, pj; int j, k, lk, nsupers; /* k - current panel to work on */ int k0; /* counter of the next supernode to be factored */ int kk, kk0, kk1, kk2, jj0; /* panels in the look-ahead window */ int iukp0, rukp0, flag0, flag1; int nsupr, nbrow, segsize; int msg0, msg2; int_t **Ufstnz_br_ptr, **Lrowind_bc_ptr; doublecomplex **Unzval_br_ptr, **Lnzval_bc_ptr; int_t *index; doublecomplex *nzval; doublecomplex *ucol; int *indirect, *indirect2; int_t *tempi; doublecomplex *tempu, *tempv, *tempr; /* doublecomplex *tempv2d, *tempU2d; Sherry */ int iinfo; int *ToRecv, *ToSendD, **ToSendR; Glu_persist_t *Glu_persist = LUstruct->Glu_persist; LocalLU_t *Llu = LUstruct->Llu; superlu_scope_t *scp; float s_eps; double thresh; /*int full;*/ int ldt, ldu, lead_zero, ncols, ncb, nrb, p, pr, pc, nblocks; int_t *etree_supno_l, *etree_supno, *blocks, *blockr, *Ublock, *Urows, *Lblock, *Lrows, *perm_u, *sf_block, *sf_block_l, *nnodes_l, *nnodes_u, *edag_supno_l, *recvbuf, **edag_supno; float edag_supno_l_bytes; #ifdef ISORT int_t *iperm_u; #endif int *msgcnt; /* Count the size of the message xfer'd in each buffer: * 0 : transferred in Lsub_buf[] * 1 : transferred in Lval_buf[] * 2 : transferred in Usub_buf[] * 3 : transferred in Uval_buf[] */ int **msgcnts, **msgcntsU; /* counts in the look-ahead window */ int *factored; /* factored[j] == 0 : L col panel j is factorized. */ int *factoredU; /* factoredU[i] == 1 : U row panel i is factorized. */ int nnodes, *sendcnts, *sdispls, *recvcnts, *rdispls, *srows, *rrows; etree_node *head, *tail, *ptr; int *num_child; int num_look_aheads, look_id; int *look_ahead; /* global look_ahead table */ int_t *perm_c_supno, *iperm_c_supno; /* perm_c_supno[k] = j means at the k-th step of elimination, * the j-th supernode is chosen. */ MPI_Request *recv_req, **recv_reqs, **send_reqs, **send_reqs_u, **recv_reqs_u; MPI_Request *send_req, *U_diag_blk_send_req = NULL; MPI_Status status; void *attr_val; int flag; /* The following variables are used to pad GEMM dimensions so that each is a multiple of vector length (8 doubles for KNL) */ int gemm_m_pad = GEMM_PADLEN, gemm_k_pad = GEMM_PADLEN, gemm_n_pad = GEMM_PADLEN; int gemm_padding = 0; int iword = sizeof (int_t); int dword = sizeof (doublecomplex); /* For measuring load imbalence in omp threads */ double omp_load_imblc = 0.0; double *omp_loop_time; double schur_flop_timer = 0.0; double pdgstrf2_timer = 0.0; double pdgstrs2_timer = 0.0; double lookaheadupdatetimer = 0.0; double InitTimer = 0.0; /* including compute schedule, malloc */ double tt_start, tt_end; /* #if !defined( GPU_ACC ) */ /* Counters for memory operations and timings */ double scatter_mem_op_counter = 0.0; double scatter_mem_op_timer = 0.0; double scatterL_mem_op_counter = 0.0; double scatterL_mem_op_timer = 0.0; double scatterU_mem_op_counter = 0.0; double scatterU_mem_op_timer = 0.0; /* Counters for flops/gather/scatter and timings */ double GatherLTimer = 0.0; double LookAheadRowSepMOP = 0.0; double GatherUTimer = 0.0; double GatherMOP = 0.0; double LookAheadGEMMTimer = 0.0; double LookAheadGEMMFlOp = 0.0; double LookAheadScatterTimer = 0.0; double LookAheadScatterMOP = 0.0; double RemainGEMMTimer = 0.0; double RemainGEMM_flops = 0.0; double RemainScatterTimer = 0.0; double NetSchurUpTimer = 0.0; double schur_flop_counter = 0.0; /* #endif */ #if ( PRNTlevel>= 1) /* count GEMM max dimensions */ int gemm_max_m = 0, gemm_max_n = 0, gemm_max_k = 0; #endif #if ( DEBUGlevel>=2 ) int_t num_copy = 0, num_update = 0; #endif #if ( PRNTlevel==3 ) int zero_msg = 0, total_msg = 0; #endif #if ( PROFlevel>=1 ) double t1, t2; float msg_vol = 0, msg_cnt = 0; double comm_wait_time = 0.0; /* Record GEMM dimensions and times */ FILE *fopen(), *fgemm; int gemm_count = 0; typedef struct { int m, n, k; double microseconds; } gemm_profile; gemm_profile *gemm_stats; #endif /* Test the input parameters. */ *info = 0; if (m < 0) *info = -2; else if (n < 0) *info = -3; if (*info) { pxerr_dist ("pzgstrf", grid, -*info); return (-1); } /* Quick return if possible. */ if (m == 0 || n == 0) return 0; double tt1 = SuperLU_timer_ (); /* * Initialization. */ iam = grid->iam; Pc = grid->npcol; Pr = grid->nprow; myrow = MYROW (iam, grid); mycol = MYCOL (iam, grid); nsupers = Glu_persist->supno[n - 1] + 1; xsup = Glu_persist->xsup; s_eps = smach_dist("Epsilon"); thresh = s_eps * anorm; MPI_Comm_get_attr (MPI_COMM_WORLD, MPI_TAG_UB, &attr_val, &flag); if (!flag) { fprintf (stderr, "Could not get TAG_UB\n"); return (-1); } int tag_ub = *(int *) attr_val; #if ( PRNTlevel>=1 ) if (!iam) { printf ("MPI tag upper bound = %d\n", tag_ub); fflush(stdout); } #endif #if ( DEBUGlevel>=1 ) if (s_eps == 0.0) printf (" ***** warning s_eps = %e *****\n", s_eps); CHECK_MALLOC (iam, "Enter pdgstrf()"); #endif #if (PROFlevel >= 1 ) gemm_stats = (gemm_profile *) SUPERLU_MALLOC(nsupers * sizeof(gemm_profile)); if (iam == 0) fgemm = fopen("dgemm_mnk.dat", "w"); int *prof_sendR = intCalloc_dist(nsupers); #endif stat->ops[FACT] = 0.0; stat->current_buffer = 0.0; stat->peak_buffer = 0.0; stat->gpu_buffer = 0.0; /* make sure the range of look-ahead window [0, MAX_LOOKAHEADS-1] */ num_look_aheads = SUPERLU_MAX(0, SUPERLU_MIN(options->num_lookaheads, MAX_LOOKAHEADS - 1)); if (Pr * Pc > 1) { if (!(U_diag_blk_send_req = (MPI_Request *) SUPERLU_MALLOC (Pr * sizeof (MPI_Request)))) ABORT ("Malloc fails for U_diag_blk_send_req[]."); /* flag no outstanding Isend */ U_diag_blk_send_req[myrow] = MPI_REQUEST_NULL; /* used 0 before */ /* allocating buffers for look-ahead */ i = Llu->bufmax[0]; if (i != 0) { if ( !(Llu->Lsub_buf_2[0] = intMalloc_dist ((num_look_aheads + 1) * ((size_t) i))) ) ABORT ("Malloc fails for Lsub_buf."); tempi = Llu->Lsub_buf_2[0]; for (jj = 0; jj < num_look_aheads; jj++) Llu->Lsub_buf_2[jj+1] = tempi + i*(jj+1); /* vectorize */ //Llu->Lsub_buf_2[jj + 1] = Llu->Lsub_buf_2[jj] + i; } i = Llu->bufmax[1]; if (i != 0) { if (!(Llu->Lval_buf_2[0] = doublecomplexMalloc_dist ((num_look_aheads + 1) * ((size_t) i)))) ABORT ("Malloc fails for Lval_buf[]."); tempr = Llu->Lval_buf_2[0]; for (jj = 0; jj < num_look_aheads; jj++) Llu->Lval_buf_2[jj+1] = tempr + i*(jj+1); /* vectorize */ //Llu->Lval_buf_2[jj + 1] = Llu->Lval_buf_2[jj] + i; } i = Llu->bufmax[2]; if (i != 0) { if (!(Llu->Usub_buf_2[0] = intMalloc_dist ((num_look_aheads + 1) * i))) ABORT ("Malloc fails for Usub_buf_2[]."); tempi = Llu->Usub_buf_2[0]; for (jj = 0; jj < num_look_aheads; jj++) Llu->Usub_buf_2[jj+1] = tempi + i*(jj+1); /* vectorize */ //Llu->Usub_buf_2[jj + 1] = Llu->Usub_buf_2[jj] + i; } i = Llu->bufmax[3]; if (i != 0) { if (!(Llu->Uval_buf_2[0] = doublecomplexMalloc_dist ((num_look_aheads + 1) * i))) ABORT ("Malloc fails for Uval_buf_2[]."); tempr = Llu->Uval_buf_2[0]; for (jj = 0; jj < num_look_aheads; jj++) Llu->Uval_buf_2[jj+1] = tempr + i*(jj+1); /* vectorize */ //Llu->Uval_buf_2[jj + 1] = Llu->Uval_buf_2[jj] + i; } } log_memory( (Llu->bufmax[0] + Llu->bufmax[2]) * (num_look_aheads + 1) * iword + (Llu->bufmax[1] + Llu->bufmax[3]) * (num_look_aheads + 1) * dword, stat ); /* creating pointers to the look-ahead buffers */ if (! (Lsub_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int_t *)))) ABORT ("Malloc fails for Lsub_buf_2[]."); if (! (Lval_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (doublecomplex *)))) ABORT ("Malloc fails for Lval_buf_2[]."); if (! (Usub_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int_t *)))) ABORT ("Malloc fails for Uval_buf_2[]."); if (! (Uval_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (doublecomplex *)))) ABORT ("Malloc fails for buf_2[]."); for (i = 0; i <= num_look_aheads; i++) { Lval_buf_2[i] = Llu->Lval_buf_2[i]; Lsub_buf_2[i] = Llu->Lsub_buf_2[i]; Uval_buf_2[i] = Llu->Uval_buf_2[i]; Usub_buf_2[i] = Llu->Usub_buf_2[i]; } if (!(msgcnts = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int *)))) ABORT ("Malloc fails for msgcnts[]."); if (!(msgcntsU = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int *)))) ABORT ("Malloc fails for msgcntsU[]."); for (i = 0; i <= num_look_aheads; i++) { if (!(msgcnts[i] = SUPERLU_MALLOC (4 * sizeof (int)))) ABORT ("Malloc fails for msgcnts[]."); if (!(msgcntsU[i] = SUPERLU_MALLOC (4 * sizeof (int)))) ABORT ("Malloc fails for msgcntsU[]."); } if (! (recv_reqs_u = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *)))) ABORT ("Malloc fails for recv_reqs_u[]."); if (! (send_reqs_u = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *)))) ABORT ("Malloc fails for send_reqs_u[]."); if (! (send_reqs = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *)))) ABORT ("Malloc fails for send_reqs_u[]."); if (! (recv_reqs = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *)))) ABORT ("Malloc fails for recv_reqs[]."); for (i = 0; i <= num_look_aheads; i++) { if (!(recv_reqs_u[i] = (MPI_Request *) SUPERLU_MALLOC (2 * sizeof (MPI_Request)))) ABORT ("Malloc fails for recv_req_u[i]."); if (!(send_reqs_u[i] = (MPI_Request *) SUPERLU_MALLOC (2 * Pr * sizeof (MPI_Request)))) ABORT ("Malloc fails for send_req_u[i]."); if (!(send_reqs[i] = (MPI_Request *) SUPERLU_MALLOC (2 * Pc * sizeof (MPI_Request)))) ABORT ("Malloc fails for send_reqs[i]."); if (!(recv_reqs[i] = (MPI_Request *) SUPERLU_MALLOC (4 * sizeof (MPI_Request)))) ABORT ("Malloc fails for recv_req[]."); send_reqs[i][0] = send_reqs[i][1] = MPI_REQUEST_NULL; recv_reqs[i][0] = recv_reqs[i][1] = MPI_REQUEST_NULL; } if (!(factored = SUPERLU_MALLOC (nsupers * sizeof (int_t)))) ABORT ("Malloc fails for factored[]."); if (!(factoredU = SUPERLU_MALLOC (nsupers * sizeof (int_t)))) ABORT ("Malloc fails for factoredU[]."); for (i = 0; i < nsupers; i++) factored[i] = factoredU[i] = -1; log_memory(2 * nsupers * iword, stat); int num_threads = 1; #ifdef _OPENMP #pragma omp parallel default(shared) #pragma omp master { //if (omp_get_thread_num () == 0) num_threads = omp_get_num_threads (); } #endif #if 0 omp_loop_time = (double *) _mm_malloc (sizeof (double) * num_threads,64); #else omp_loop_time = (double *) doubleMalloc_dist(num_threads); #endif #if ( PRNTlevel>=1 ) if(!iam) { printf(".. Starting with %d OpenMP threads \n", num_threads ); fflush(stdout); } #endif nblocks = 0; ncb = nsupers / Pc; /* number of column blocks, horizontal */ nrb = nsupers / Pr; /* number of row blocks, vertical */ /* in order to have dynamic scheduling */ int *full_u_cols; int *blk_ldu; #if 0 full_u_cols = (int_t *) _mm_malloc (sizeof (int_t) * ncb,64); blk_ldu = (int_t *) _mm_malloc (sizeof (int_t) * ncb,64); #else full_u_cols = SUPERLU_MALLOC(ncb * sizeof(int)); blk_ldu = SUPERLU_MALLOC(ncb * sizeof(int)); #endif log_memory(2 * ncb * iword, stat); #if 0 /* Sherry: not used? */ /* This bunch is used for static scheduling */ pair *full_col_count = (pair *) _mm_malloc (sizeof (pair) * ncb,64); int_t *count_cols, *sum_cols, *partition; count_cols = (int_t *) _mm_malloc (sizeof (int_t) * num_threads,64); sum_cols = (int_t *) _mm_malloc (sizeof (int_t) * num_threads,64); partition = (int_t *) _mm_malloc (sizeof (int_t) * num_threads * ncb,64); int_t ldp = ncb; #endif /* ################################################################## * Compute a good static schedule based on the factorization task graph. * ################################################################## */ perm_c_supno = SUPERLU_MALLOC (2 * nsupers * sizeof (int_t)); iperm_c_supno = perm_c_supno + nsupers; static_schedule(options, m, n, LUstruct, grid, stat, perm_c_supno, iperm_c_supno, info); #if ( DEBUGlevel >= 2 ) PrintInt10("schedule:perm_c_supno", nsupers, perm_c_supno); /* Turn off static schedule */ printf("[%d] .. Turn off static schedule for debugging ..\n", iam); for (i = 0; i < nsupers; ++i) perm_c_supno[i] = iperm_c_supno[i] = i; #endif /* ################################################################## */ /* constructing look-ahead table to indicate the last dependency */ int *look_ahead_l; /* Sherry: add comment on look_ahead_l[] */ stat->num_look_aheads = num_look_aheads; look_ahead_l = SUPERLU_MALLOC (nsupers * sizeof (int)); look_ahead = SUPERLU_MALLOC (nsupers * sizeof (int)); for (lb = 0; lb < nsupers; lb++) look_ahead_l[lb] = -1; /* vectorized */ log_memory(3 * nsupers * iword, stat); /* Sherry: omp parallel? not worth doing, due to concurrent write to look_ahead_l[jb] */ for (lb = 0; lb < nrb; ++lb) { /* go through U-factor */ ib = lb * Pr + myrow; index = Llu->Ufstnz_br_ptr[lb]; if (index) { /* Not an empty row */ k = BR_HEADER; for (j = 0; j < index[0]; ++j) { jb = index[k]; /* global block number */ if (jb != ib) look_ahead_l[jb] = SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]); k += UB_DESCRIPTOR + SuperSize (index[k]); } } } if (myrow < nsupers % grid->nprow) { /* leftover block rows */ ib = nrb * Pr + myrow; index = Llu->Ufstnz_br_ptr[nrb]; if (index) { /* Not an empty row */ k = BR_HEADER; for (j = 0; j < index[0]; ++j) { jb = index[k]; if (jb != ib) look_ahead_l[jb] = SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]); k += UB_DESCRIPTOR + SuperSize (index[k]); } } } if (options->SymPattern == NO) { /* Sherry: omp parallel? not worth doing, due to concurrent write to look_ahead_l[jb] */ for (lb = 0; lb < ncb; lb++) { /* go through L-factor */ ib = lb * Pc + mycol; index = Llu->Lrowind_bc_ptr[lb]; if (index) { k = BC_HEADER; for (j = 0; j < index[0]; j++) { jb = index[k]; if (jb != ib) look_ahead_l[jb] = SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]); k += LB_DESCRIPTOR + index[k + 1]; } } } if (mycol < nsupers % grid->npcol) { /* leftover block columns */ ib = ncb * Pc + mycol; index = Llu->Lrowind_bc_ptr[ncb]; if (index) { k = BC_HEADER; for (j = 0; j < index[0]; j++) { jb = index[k]; if (jb != ib) look_ahead_l[jb] = SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]); k += LB_DESCRIPTOR + index[k + 1]; } } } } MPI_Allreduce (look_ahead_l, look_ahead, nsupers, MPI_INT, MPI_MAX, grid->comm); SUPERLU_FREE (look_ahead_l); #ifdef ISORT iperm_u = SUPERLU_MALLOC (nsupers * sizeof (int_t)); perm_u = SUPERLU_MALLOC (nsupers * sizeof (int_t)); #else perm_u = SUPERLU_MALLOC (2 * nsupers * sizeof (int_t)); #endif log_memory(nsupers * iword, stat); k = sp_ienv_dist (3); /* max supernode size */ #if 0 if ( !(Llu->ujrow = doubleMalloc_dist(k*(k+1)/2)) ) ABORT("Malloc fails for ujrow[]."); #else /* Instead of half storage, we'll do full storage */ if (!(Llu->ujrow = doublecomplexCalloc_dist (k * k))) ABORT ("Malloc fails for ujrow[]."); #endif log_memory(k * k * iword, stat); #if ( PRNTlevel>=1 ) if (!iam) { printf (".. thresh = s_eps %e * anorm %e = %e\n", s_eps, anorm, thresh); printf (".. Buffer size: Lsub %ld\tLval %ld\tUsub %ld\tUval %ld\tLDA %ld\n", (long int) Llu->bufmax[0], (long int) Llu->bufmax[1], (long int) Llu->bufmax[2], (long int) Llu->bufmax[3], (long int) Llu->bufmax[4]); fflush(stdout); } #endif Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; Lnzval_bc_ptr = Llu->Lnzval_bc_ptr; Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; Unzval_br_ptr = Llu->Unzval_br_ptr; ToRecv = Llu->ToRecv; ToSendD = Llu->ToSendD; ToSendR = Llu->ToSendR; ldt = sp_ienv_dist (3); /* Size of maximum supernode */ k = CEILING (nsupers, Pr); /* Number of local block rows */ /* Following code is for finding maximum row dimension of all L panels */ int local_max_row_size = 0; int max_row_size; #if 0 #if defined _OPENMP // Sherry: parallel reduction -- seems slower? #pragma omp parallel for reduction(max :local_max_row_size) private(lk,lsub) #endif #endif for (int i = mycol; i < nsupers; i += Pc) { /* grab my local columns */ //int tpc = PCOL (i, grid); lk = LBj (i, grid); lsub = Lrowind_bc_ptr[lk]; if (lsub != NULL) { if (lsub[1] > local_max_row_size) local_max_row_size = lsub[1]; } } /* Max row size is global reduction within a row */ MPI_Allreduce (&local_max_row_size, &max_row_size, 1, MPI_INT, MPI_MAX, (grid->rscp.comm)); /* Buffer size is max of look-ahead window */ /* int_t buffer_size = SUPERLU_MAX (max_row_size * num_threads * ldt, get_max_buffer_size ()); */ #ifdef GPU_ACC int cublas_nb = get_cublas_nb(); int nstreams = get_num_cuda_streams (); int buffer_size = SUPERLU_MAX(max_row_size*nstreams*cublas_nb,get_max_buffer_size()); /* array holding last column blk for each partition, used in SchCompUdt--CUDA.c */ #if 0 int *stream_end_col = (int_t *) _mm_malloc (sizeof (int_t) * nstreams,64); #else int *stream_end_col = SUPERLU_MALLOC( nstreams * sizeof(int) ); #endif #else /* not to use GPU */ int Threads_per_process = get_thread_per_process(); int buffer_size = SUPERLU_MAX(max_row_size*Threads_per_process*ldt,get_max_buffer_size()); #endif /* end ifdef GPU_ACC */ int_t max_ncols = 0; #if 0 /* symmetric assumption -- using L's supernode to estimate. */ /* Note that in following expression 8 can be anything as long as its not too big */ int bigu_size = 8 * sp_ienv_dist (3) * (max_row_size); #else int_t bigu_size = estimate_bigu_size( nsupers, Ufstnz_br_ptr, Glu_persist, grid, perm_u, &max_ncols ); #endif /* +16 to avoid cache line false sharing */ // int_t bigv_size = SUPERLU_MAX(max_row_size * (bigu_size / ldt), int_t bigv_size = SUPERLU_MAX(max_row_size * max_ncols, (ldt*ldt + CACHELINE / dword) * num_threads); /* bigU and bigV are either on CPU or on GPU, not both. */ doublecomplex* bigU; /* for storing entire U(k,:) panel, prepare for GEMM. bigU has the same size either on CPU or on CPU. */ doublecomplex* bigV; /* for storing GEMM output matrix, i.e. update matrix. bigV is large to hold the aggregate GEMM output.*/ bigU = NULL; bigV = NULL; #if ( PRNTlevel>=1 ) if(!iam) { printf("\t.. GEMM buffer size: max_row_size X max_ncols = %d x " IFMT "\n", max_row_size, max_ncols); printf(".. BIG U size " IFMT "\t BIG V size " IFMT "\n", bigu_size, bigv_size); fflush(stdout); } #endif #ifdef GPU_ACC if ( checkCuda(cudaHostAlloc((void**)&bigU, bigu_size * sizeof(doublecomplex), cudaHostAllocDefault)) ) ABORT("Malloc fails for zgemm buffer U "); bigv_size = buffer_size; #if ( PRNTlevel>=1 ) if (!iam) printf("[%d] .. BIG V bigv_size %d, using buffer_size %d (on GPU)\n", iam, bigv_size, buffer_size); #endif if ( checkCuda(cudaHostAlloc((void**)&bigV, bigv_size * sizeof(doublecomplex) ,cudaHostAllocDefault)) ) ABORT("Malloc fails for zgemm buffer V"); DisplayHeader(); #if ( PRNTlevel>=1 ) printf(" Starting with %d Cuda Streams \n",nstreams ); #endif cublasHandle_t *handle; handle = (cublasHandle_t *) SUPERLU_MALLOC(sizeof(cublasHandle_t)*nstreams); for(int i = 0; i < nstreams; i++) handle[i] = create_handle(); // creating streams cudaStream_t *streams; streams = (cudaStream_t *) SUPERLU_MALLOC(sizeof(cudaStream_t)*nstreams); for (int i = 0; i < nstreams; ++i) checkCuda( cudaStreamCreate(&streams[i]) ); // allocating data in device doublecomplex *dA, *dB, *dC; cudaError_t cudaStat; #if 0 // cudaStat = cudaMalloc( (void**)&dA, m*k*sizeof(double)); // HOw much should be the size of dA? // for time being just making it // cudaStat = cudaMalloc( (void**)&dA, ((max_row_size*sp_ienv_dist(3)))* sizeof(double)); #endif cudaStat = cudaMalloc( (void**)&dA, max_row_size*sp_ienv_dist(3)* sizeof(doublecomplex)); if (cudaStat!= cudaSuccess) { fprintf(stderr, "!!!! Error in allocating A in the device %ld \n",m*k*sizeof(doublecomplex) ); return 1; } // size of B should be max_supernode_size*buffer cudaStat = cudaMalloc((void**)&dB, bigu_size * sizeof(doublecomplex)); if (cudaStat!= cudaSuccess) { fprintf(stderr, "!!!! Error in allocating B in the device %ld \n",n*k*sizeof(doublecomplex)); return 1; } cudaStat = cudaMalloc((void**)&dC, buffer_size* sizeof(doublecomplex) ); if (cudaStat!= cudaSuccess) { fprintf(stderr, "!!!! Error in allocating C in the device \n" ); return 1; } stat->gpu_buffer += ( max_row_size * sp_ienv_dist(3) + bigu_size + buffer_size ) * dword; #else /* not CUDA */ // for GEMM padding 0 j = bigu_size / ldt; bigu_size += (gemm_k_pad * (j + ldt + gemm_n_pad)); bigv_size += (gemm_m_pad * (j + max_row_size + gemm_n_pad)); //#ifdef __INTEL_COMPILER // bigU = _mm_malloc(bigu_size * sizeof(doublecomplex), 1<<12); // align at 4K page // bigV = _mm_malloc(bigv_size * sizeof(doublecomplex), 1<<12); //#else if ( !(bigU = doublecomplexMalloc_dist(bigu_size)) ) ABORT ("Malloc fails for zgemm U buffer"); //Maximum size of bigU= sqrt(buffsize) ? // int bigv_size = 8 * ldt * ldt * num_threads; if ( !(bigV = doublecomplexMalloc_dist(bigv_size)) ) ABORT ("Malloc failed for zgemm V buffer"); //#endif #endif /* end ifdef GPU_ACC */ log_memory((bigv_size + bigu_size) * dword, stat); // mlock(bigU,(bigu_size) * sizeof (double)); #if ( PRNTlevel>=1 ) if(!iam) { printf (" Max row size is %d \n", max_row_size); printf (" Threads per process %d \n", num_threads); fflush(stdout); } #endif #if 0 /* Sherry */ if (!(tempv2d = doublecomplexCalloc_dist (2 * ((size_t) ldt) * ldt))) ABORT ("Calloc fails for tempv2d[]."); tempU2d = tempv2d + ldt * ldt; #endif /* Sherry: (ldt + 16), avoid cache line false sharing. KNL cacheline size = 64 bytes = 16 int */ iinfo = ldt + CACHELINE / sizeof(int); if (!(indirect = SUPERLU_MALLOC (iinfo * num_threads * sizeof(int)))) ABORT ("Malloc fails for indirect[]."); if (!(indirect2 = SUPERLU_MALLOC (iinfo * num_threads * sizeof(int)))) ABORT ("Malloc fails for indirect[]."); log_memory(2 * ldt*ldt * dword + 2 * iinfo * num_threads * iword, stat); int_t *lookAheadFullRow,*lookAheadStRow,*lookAhead_lptr,*lookAhead_ib, *RemainStRow,*Remain_lptr,*Remain_ib; lookAheadFullRow = intMalloc_dist( (num_look_aheads+1) ); lookAheadStRow = intMalloc_dist( (num_look_aheads+1) ); lookAhead_lptr = intMalloc_dist( (num_look_aheads+1) ); lookAhead_ib = intMalloc_dist( (num_look_aheads+1) ); int_t mrb = (nsupers + Pr - 1) / Pr; int_t mcb = (nsupers + Pc - 1) / Pc; RemainStRow = intMalloc_dist(mrb); #if 0 Remain_lptr = (int *) _mm_malloc(sizeof(int)*mrb,1); #else Remain_lptr = intMalloc_dist(mrb); #endif // mlock(Remain_lptr, sizeof(int)*mrb ); Remain_ib = intMalloc_dist(mrb); Remain_info_t *Remain_info; #if 0 Remain_info = (Remain_info_t *) _mm_malloc(mrb*sizeof(Remain_info_t),64); #else Remain_info = (Remain_info_t *) SUPERLU_MALLOC(mrb*sizeof(Remain_info_t)); #endif doublecomplex *lookAhead_L_buff, *Remain_L_buff; /* Stores entire L-panel */ Ublock_info_t *Ublock_info; ldt = sp_ienv_dist (3); /* max supernode size */ /* The following is quite loose */ lookAhead_L_buff = doublecomplexMalloc_dist(ldt*ldt* (num_look_aheads+1) ); #if 0 Remain_L_buff = (doublecomplex *) _mm_malloc( sizeof(doublecomplex)*(Llu->bufmax[1]),64); Ublock_info = (Ublock_info_t *) _mm_malloc(mcb*sizeof(Ublock_info_t),64); int * Ublock_info_iukp = (int *) _mm_malloc(mcb*sizeof(int),64); int * Ublock_info_rukp = (int *) _mm_malloc(mcb*sizeof(int),64); int * Ublock_info_jb = (int *) _mm_malloc(mcb*sizeof(int),64); #else j = gemm_m_pad * (ldt + max_row_size + gemm_k_pad); Remain_L_buff = doublecomplexMalloc_dist(Llu->bufmax[1] + j); /* This is loose */ Ublock_info = (Ublock_info_t *) SUPERLU_MALLOC(mcb*sizeof(Ublock_info_t)); int *Ublock_info_iukp = (int *) SUPERLU_MALLOC(mcb*sizeof(int)); int *Ublock_info_rukp = (int *) SUPERLU_MALLOC(mcb*sizeof(int)); int *Ublock_info_jb = (int *) SUPERLU_MALLOC(mcb*sizeof(int)); #endif long long alloc_mem = 3 * mrb * iword + mrb * sizeof(Remain_info_t) + ldt * ldt * (num_look_aheads+1) * dword + Llu->bufmax[1] * dword ; log_memory(alloc_mem, stat); InitTimer = SuperLU_timer_() - tt1; double pxgstrfTimer = SuperLU_timer_(); /* ################################################################## ** Handle first block column separately to start the pipeline. ** ################################################################## */ look_id = 0; msgcnt = msgcnts[0]; /* Lsub[0] to be transferred */ send_req = send_reqs[0]; recv_req = recv_reqs[0]; k0 = 0; k = perm_c_supno[0]; kcol = PCOL (k, grid); krow = PROW (k, grid); if (mycol == kcol) { double ttt1 = SuperLU_timer_(); /* panel factorization */ PZGSTRF2 (options, k0, k, thresh, Glu_persist, grid, Llu, U_diag_blk_send_req, tag_ub, stat, info); pdgstrf2_timer += SuperLU_timer_()-ttt1; scp = &grid->rscp; /* The scope of process row. */ /* Multicasts numeric values of L(:,0) to process rows. */ lk = LBj (k, grid); /* Local block number. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; if (lsub) { /* number of entries in Lsub_buf[] to be transferred */ msgcnt[0] = lsub[1] + BC_HEADER + lsub[0] * LB_DESCRIPTOR; /* number of entries in Lval_buf[] to be transferred */ msgcnt[1] = lsub[1] * SuperSize (k); } else { msgcnt[0] = msgcnt[1] = 0; } for (pj = 0; pj < Pc; ++pj) { if (ToSendR[lk][pj] != EMPTY) { #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Isend (lsub, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, 0) /* 0 */, scp->comm, &send_req[pj]); MPI_Isend (lusup, msgcnt[1], SuperLU_MPI_DOUBLE_COMPLEX, pj, SLU_MPI_TAG (1, 0) /* 1 */, scp->comm, &send_req[pj + Pc]); #if ( DEBUGlevel>=2 ) printf ("[%d] first block cloumn Send L(:,%4d): lsub %4d, lusup %4d to Pc %2d\n", iam, 0, msgcnt[0], msgcnt[1], pj); #endif #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; ++prof_sendR[lk]; msg_cnt += 2; msg_vol += msgcnt[0] * iword + msgcnt[1] * dword; #endif } /* end if */ } /* end for pj ... */ } else { /* Post immediate receives. */ if (ToRecv[k] >= 1) { /* Recv block column L(:,0). */ scp = &grid->rscp; /* The scope of process row. */ #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Irecv (Lsub_buf_2[0], Llu->bufmax[0], mpi_int_t, kcol, SLU_MPI_TAG (0, 0) /* 0 */ , scp->comm, &recv_req[0]); MPI_Irecv (Lval_buf_2[0], Llu->bufmax[1], SuperLU_MPI_DOUBLE_COMPLEX, kcol, SLU_MPI_TAG (1, 0) /* 1 */ , scp->comm, &recv_req[1]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif } } /* end if mycol == 0 */ factored[k] = 0; /* flag column k as factored. */ /* post receive of first U-row */ if (myrow != krow) { if (ToRecv[k] == 2) { /* Recv block row U(k,:). */ scp = &grid->cscp; /* The scope of process column. */ Usub_buf = Llu->Usub_buf_2[0]; Uval_buf = Llu->Uval_buf_2[0]; #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Irecv (Usub_buf, Llu->bufmax[2], mpi_int_t, krow, SLU_MPI_TAG (2, 0) /* 2%tag_ub */ , scp->comm, &recv_reqs_u[0][0]); MPI_Irecv (Uval_buf, Llu->bufmax[3], SuperLU_MPI_DOUBLE_COMPLEX, krow, SLU_MPI_TAG (3, 0) /* 3%tag_ub */ , scp->comm, &recv_reqs_u[0][1]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_DOWN] += t2; #endif } } /* ################################################################## **** MAIN LOOP **** ################################################################## */ for (k0 = 0; k0 < nsupers; ++k0) { k = perm_c_supno[k0]; /* ============================================ * * ======= look-ahead the new L columns ======= * * ============================================ */ /* tt1 = SuperLU_timer_(); */ if (k0 == 0) { /* look-ahead all the columns in the window */ kk1 = k0 + 1; kk2 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1); } else { /* look-ahead one new column after the current window */ kk1 = k0 + num_look_aheads; kk2 = SUPERLU_MIN (kk1, nsupers - 1); } for (kk0 = kk1; kk0 <= kk2; kk0++) { /* loop through look-ahead window in L */ kk = perm_c_supno[kk0]; /* use the ordering from static schedule */ look_id = kk0 % (1 + num_look_aheads); /* which column in window */ if (look_ahead[kk] < k0) { /* does not depend on current column k */ kcol = PCOL (kk, grid); if (mycol == kcol) { /* I own this panel */ /* Panel factorization -- Factor diagonal and subdiagonal L blocks and test for exact singularity. */ factored[kk] = 0; /* flag column kk as factored */ double ttt1 = SuperLU_timer_(); PZGSTRF2 (options, kk0, kk, thresh, Glu_persist, grid, Llu, U_diag_blk_send_req, tag_ub, stat, info); pdgstrf2_timer += SuperLU_timer_() - ttt1; /* Multicasts numeric values of L(:,kk) to process rows. */ /* ttt1 = SuperLU_timer_(); */ msgcnt = msgcnts[look_id]; /* point to the proper count array */ send_req = send_reqs[look_id]; lk = LBj (kk, grid); /* Local block number in L. */ lsub1 = Lrowind_bc_ptr[lk]; if (lsub1) { msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR; /* size of metadata */ msgcnt[1] = lsub1[1] * SuperSize (kk); /* Lval_buf[] size */ } else { msgcnt[0] = 0; msgcnt[1] = 0; } scp = &grid->rscp; /* The scope of process row. */ for (pj = 0; pj < Pc; ++pj) { if (ToSendR[lk][pj] != EMPTY) { lusup1 = Lnzval_bc_ptr[lk]; #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */ scp->comm, &send_req[pj]); MPI_Isend (lusup1, msgcnt[1], SuperLU_MPI_DOUBLE_COMPLEX, pj, SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */ scp->comm, &send_req[pj + Pc]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; ++prof_sendR[lk]; #endif #if ( DEBUGlevel>=2 ) printf ("[%d] -1- Send L(:,%4d): #lsub1 %4d, #lusup1 %4d right to Pj %2d\n", iam, kk, msgcnt[0], msgcnt[1], pj); #endif } } /* stat->time9 += SuperLU_timer_() - ttt1; */ } else { /* Post Recv of block column L(:,kk). */ /* double ttt1 = SuperLU_timer_(); */ if (ToRecv[kk] >= 1) { scp = &grid->rscp; /* The scope of process row. */ recv_req = recv_reqs[look_id]; #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Irecv (Lsub_buf_2[look_id], Llu->bufmax[0], mpi_int_t, kcol, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */ scp->comm, &recv_req[0]); MPI_Irecv (Lval_buf_2[look_id], Llu->bufmax[1], SuperLU_MPI_DOUBLE_COMPLEX, kcol, SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */ scp->comm, &recv_req[1]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif } /* stat->time10 += SuperLU_timer_() - ttt1; */ } /* end if mycol == Pc(kk) */ } /* end if look-ahead in L panels */ /* Pre-post irecv for U-row look-ahead */ krow = PROW (kk, grid); if (myrow != krow) { if (ToRecv[kk] == 2) { /* post iRecv block row U(kk,:). */ scp = &grid->cscp; /* The scope of process column. */ Usub_buf = Llu->Usub_buf_2[look_id]; Uval_buf = Llu->Uval_buf_2[look_id]; #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Irecv (Usub_buf, Llu->bufmax[2], mpi_int_t, krow, SLU_MPI_TAG (2, kk0) /* (4*kk0+2)%tag_ub */ , scp->comm, &recv_reqs_u[look_id][0]); MPI_Irecv (Uval_buf, Llu->bufmax[3], SuperLU_MPI_DOUBLE_COMPLEX, krow, SLU_MPI_TAG (3, kk0) /* (4*kk0+3)%tag_ub */ , scp->comm, &recv_reqs_u[look_id][1]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_DOWN] += t2; #endif } } } /* end for each column in look-ahead window for L panels */ /* stat->time4 += SuperLU_timer_()-tt1; */ /* ================================= * * ==== look-ahead the U rows === * * ================================= */ kk1 = k0; kk2 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1); for (kk0 = kk1; kk0 < kk2; kk0++) { kk = perm_c_supno[kk0]; /* order determined from static schedule */ if (factoredU[kk0] != 1 && look_ahead[kk] < k0) { /* does not depend on current column k */ kcol = PCOL (kk, grid); krow = PROW (kk, grid); lk = LBj (kk, grid); /* Local block number across row. NOT USED?? -- Sherry */ look_id = kk0 % (1 + num_look_aheads); msgcnt = msgcntsU[look_id]; recv_req = recv_reqs[look_id]; /* ================================================= * * Check if diagonal block has been received * * for panel factorization of U in look-ahead window * * ================================================= */ if (mycol == kcol) { /* I own this column panel, no need to receive L */ flag0 = flag1 = 1; msgcnt[0] = msgcnt[1] = -1; /* No need to transfer Lsub, nor Lval */ } else { /* Check to receive L(:,kk) from the left */ flag0 = flag1 = 0; if ( ToRecv[kk] >= 1 ) { #if ( PROFlevel>=1 ) TIC (t1); #endif if ( recv_req[0] != MPI_REQUEST_NULL ) { MPI_Test (&recv_req[0], &flag0, &status); if ( flag0 ) { MPI_Get_count (&status, mpi_int_t, &msgcnt[0]); recv_req[0] = MPI_REQUEST_NULL; } } else flag0 = 1; if ( recv_req[1] != MPI_REQUEST_NULL ) { MPI_Test (&recv_req[1], &flag1, &status); if ( flag1 ) { MPI_Get_count (&status, mpi_int_t, &msgcnt[1]); recv_req[1] = MPI_REQUEST_NULL; } } else flag1 = 1; #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif } else { msgcnt[0] = 0; } } if (flag0 && flag1) { /* L(:,kk) is ready */ /* tt1 = SuperLU_timer_(); */ scp = &grid->cscp; /* The scope of process column. */ if (myrow == krow) { factoredU[kk0] = 1; /* Parallel triangular solve across process row *krow* -- U(k,j) = L(k,k) \ A(k,j). */ double ttt2 = SuperLU_timer_(); #ifdef _OPENMP /* #pragma omp parallel */ /* Sherry -- parallel done inside pzgstrs2 */ #endif { PZGSTRS2 (kk0, kk, Glu_persist, grid, Llu, stat); } pdgstrs2_timer += SuperLU_timer_()-ttt2; /* stat->time8 += SuperLU_timer_()-ttt2; */ /* Multicasts U(kk,:) to process columns. */ lk = LBi (kk, grid); usub = Ufstnz_br_ptr[lk]; uval = Unzval_br_ptr[lk]; if (usub) { msgcnt[2] = usub[2]; /* metadata size */ msgcnt[3] = usub[1]; /* Uval[] size */ } else { msgcnt[2] = msgcnt[3] = 0; } if (ToSendD[lk] == YES) { for (pi = 0; pi < Pr; ++pi) { if (pi != myrow) { #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Isend (usub, msgcnt[2], mpi_int_t, pi, SLU_MPI_TAG (2, kk0), /* (4*kk0+2)%tag_ub */ scp->comm, &send_reqs_u[look_id][pi]); MPI_Isend (uval, msgcnt[3], SuperLU_MPI_DOUBLE_COMPLEX, pi, SLU_MPI_TAG (3, kk0), /* (4*kk0+3)%tag_ub */ scp->comm, &send_reqs_u[look_id][pi + Pr]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; msg_cnt += 2; msg_vol += msgcnt[2] * iword + msgcnt[3] * dword; #endif #if ( DEBUGlevel>=2 ) printf ("[%d] Send U(%4d,:) to Pr %2d\n", iam, k, pi); #endif } /* if pi ... */ } /* for pi ... */ } /* if ToSendD ... */ /* stat->time2 += SuperLU_timer_()-tt1; */ } /* end if myrow == krow */ } /* end if flag0 & flag1 ... */ } /* end if factoredU[] ... */ } /* end for kk0 ... */ /* ============================================== * * == start processing the current row of U(k,:) * * ============================================== */ knsupc = SuperSize (k); krow = PROW (k, grid); kcol = PCOL (k, grid); /* tt1 = SuperLU_timer_(); */ look_id = k0 % (1 + num_look_aheads); recv_req = recv_reqs[look_id]; send_req = send_reqs[look_id]; msgcnt = msgcnts[look_id]; Usub_buf = Llu->Usub_buf_2[look_id]; Uval_buf = Llu->Uval_buf_2[look_id]; if (mycol == kcol) { lk = LBj (k, grid); /* Local block number in L */ #if ( PROFlevel>=1 ) TIC(t1); #endif for (pj = 0; pj < Pc; ++pj) { /* Wait for Isend to complete before using lsub/lusup buffer. */ if (ToSendR[lk][pj] != EMPTY) { MPI_Wait (&send_req[pj], &status); MPI_Wait (&send_req[pj + Pc], &status); } } #if ( PROFlevel>=1 ) TOC(t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; } else { if (ToRecv[k] >= 1) { /* Recv block column L(:,k). */ scp = &grid->rscp; /* The scope of process row. */ /* ============================================= * * Waiting for L(:,kk) for outer-product uptate * * if iam in U(kk,:), then the diagonal block * * did not reach in time for panel factorization * * of U(k,:). * * ============================================= */ #if ( PROFlevel>=1 ) TIC (t1); #endif if (recv_req[0] != MPI_REQUEST_NULL) { MPI_Wait (&recv_req[0], &status); MPI_Get_count (&status, mpi_int_t, &msgcnt[0]); recv_req[0] = MPI_REQUEST_NULL; } else { msgcnt[0] = msgcntsU[look_id][0]; #if (DEBUGlevel>=2) printf("\t[%d] k=%d, look_id=%d, recv_req[0] == MPI_REQUEST_NULL, msgcnt[0] = %d\n", iam, k, look_id, msgcnt[0]); #endif } if (recv_req[1] != MPI_REQUEST_NULL) { MPI_Wait (&recv_req[1], &status); MPI_Get_count (&status, SuperLU_MPI_DOUBLE_COMPLEX, &msgcnt[1]); recv_req[1] = MPI_REQUEST_NULL; } else { msgcnt[1] = msgcntsU[look_id][1]; #if (DEBUGlevel>=2) printf("\t[%d] k=%d, look_id=%d, recv_req[1] == MPI_REQUEST_NULL, msgcnt[1] = %d\n", iam, k, look_id, msgcnt[1]); #endif } #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif #if ( DEBUGlevel>=2 ) printf("[%d] Recv L(:,%4d): #lsub %4d, #lusup %4d from Pc %2d\n", iam, k, msgcnt[0], msgcnt[1], kcol); fflush (stdout); #endif #if ( PRNTlevel==3 ) ++total_msg; if (!msgcnt[0]) ++zero_msg; #endif } else { msgcnt[0] = 0; } lsub = Lsub_buf_2[look_id]; lusup = Lval_buf_2[look_id]; } /* else if mycol = Pc(k) */ /* stat->time1 += SuperLU_timer_()-tt1; */ scp = &grid->cscp; /* The scope of process column. */ /* tt1 = SuperLU_timer_(); */ if (myrow == krow) { /* I own U(k,:) */ lk = LBi (k, grid); usub = Ufstnz_br_ptr[lk]; uval = Unzval_br_ptr[lk]; if (factoredU[k0] == -1) { /* Parallel triangular solve across process row *krow* -- U(k,j) = L(k,k) \ A(k,j). */ double ttt2 = SuperLU_timer_(); #ifdef _OPENMP /* #pragma omp parallel */ /* Sherry -- parallel done inside pzgstrs2 */ #endif { PZGSTRS2 (k0, k, Glu_persist, grid, Llu, stat); } pdgstrs2_timer += SuperLU_timer_() - ttt2; /* Sherry -- need to set factoredU[k0] = 1; ?? */ /* Multicasts U(k,:) along process columns. */ if ( usub ) { msgcnt[2] = usub[2]; /* metadata size */ msgcnt[3] = usub[1]; /* Uval[] size */ } else { msgcnt[2] = msgcnt[3] = 0; } if (ToSendD[lk] == YES) { for (pi = 0; pi < Pr; ++pi) { if (pi != myrow) { /* Matching recv was pre-posted before */ #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Send (usub, msgcnt[2], mpi_int_t, pi, SLU_MPI_TAG (2, k0), /* (4*k0+2)%tag_ub */ scp->comm); MPI_Send (uval, msgcnt[3], SuperLU_MPI_DOUBLE_COMPLEX, pi, SLU_MPI_TAG (3, k0), /* (4*k0+3)%tag_ub */ scp->comm); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_DOWN] += t2; msg_cnt += 2; msg_vol += msgcnt[2] * iword + msgcnt[3] * dword; #endif #if ( DEBUGlevel>=2 ) printf ("[%d] Send U(%4d,:) down to Pr %2d\n", iam, k, pi); #endif } /* if pi ... */ } /* for pi ... */ } /* if ToSendD ... */ } else { /* Panel U(k,:) already factorized from previous look-ahead */ /* ================================================ * * Wait for downward sending of U(k,:) to complete * * for outer-product update. * * ================================================ */ if (ToSendD[lk] == YES) { #if ( PROFlevel>=1 ) TIC (t1); #endif for (pi = 0; pi < Pr; ++pi) { if (pi != myrow) { MPI_Wait (&send_reqs_u[look_id][pi], &status); MPI_Wait (&send_reqs_u[look_id][pi + Pr], &status); } } #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_DOWN] += t2; #endif } msgcnt[2] = msgcntsU[look_id][2]; msgcnt[3] = msgcntsU[look_id][3]; } /* stat->time2 += SuperLU_timer_()-tt1; */ } else { /* myrow != krow */ /* ========================================== * * Wait for U(k,:) for outer-product updates. * * ========================================== */ if (ToRecv[k] == 2) { /* Recv block row U(k,:). */ #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Wait (&recv_reqs_u[look_id][0], &status); MPI_Get_count (&status, mpi_int_t, &msgcnt[2]); MPI_Wait (&recv_reqs_u[look_id][1], &status); MPI_Get_count (&status, SuperLU_MPI_DOUBLE_COMPLEX, &msgcnt[3]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_DOWN] += t2; #endif usub = Usub_buf; uval = Uval_buf; #if ( DEBUGlevel>=2 ) printf ("[%d] Recv U(%4d,:) from Pr %2d\n", iam, k, krow); #endif #if ( PRNTlevel==3 ) ++total_msg; if (!msgcnt[2]) ++zero_msg; #endif } else { msgcnt[2] = 0; } /* stat->time6 += SuperLU_timer_()-tt1; */ } /* end if myrow == Pr(k) */ /* * Parallel rank-k update; pair up blocks L(i,k) and U(k,j). * for (j = k+1; k < N; ++k) { * for (i = k+1; i < N; ++i) * if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid ) * && L(i,k) != 0 && U(k,j) != 0 ) * A(i,j) = A(i,j) - L(i,k) * U(k,j); */ msg0 = msgcnt[0]; msg2 = msgcnt[2]; /* tt1 = SuperLU_timer_(); */ if (msg0 && msg2) { /* L(:,k) and U(k,:) are not empty. */ nsupr = lsub[1]; /* LDA of lusup. */ if (myrow == krow) { /* Skip diagonal block L(k,k). */ lptr0 = BC_HEADER + LB_DESCRIPTOR + lsub[BC_HEADER + 1]; luptr0 = knsupc; nlb = lsub[0] - 1; } else { lptr0 = BC_HEADER; luptr0 = 0; nlb = lsub[0]; } iukp = BR_HEADER; /* Skip header; Pointer to index[] of U(k,:) */ rukp = 0; /* Pointer to nzval[] of U(k,:) */ nub = usub[0]; /* Number of blocks in the block row U(k,:) */ klst = FstBlockC (k + 1); /* ------------------------------------------------------------- Update the look-ahead block columns A(:,k+1:k+num_look_ahead) ------------------------------------------------------------- */ iukp0 = iukp; rukp0 = rukp; /* reorder the remaining columns in bottome-up */ /* TAU_STATIC_TIMER_START("LOOK_AHEAD_UPDATE"); */ for (jj = 0; jj < nub; jj++) { #ifdef ISORT iperm_u[jj] = iperm_c_supno[usub[iukp]]; /* Global block number of block U(k,j). */ perm_u[jj] = jj; #else perm_u[2 * jj] = iperm_c_supno[usub[iukp]]; /* Global block number of block U(k,j). */ perm_u[2 * jj + 1] = jj; #endif jb = usub[iukp]; /* Global block number of block U(k,j). */ nsupc = SuperSize (jb); iukp += UB_DESCRIPTOR; /* Start fstnz of block U(k,j). */ iukp += nsupc; } iukp = iukp0; #ifdef ISORT /* iperm_u is sorted based on elimination order; perm_u reorders the U blocks to match the elimination order. */ isort (nub, iperm_u, perm_u); #else qsort (perm_u, (size_t) nub, 2 * sizeof (int_t), &superlu_sort_perm); #endif /************************************************************************/ double ttx =SuperLU_timer_(); //#include "zlook_ahead_update_v4.c" #include "zlook_ahead_update.c" lookaheadupdatetimer += SuperLU_timer_() - ttx; /************************************************************************/ /*ifdef OMP_LOOK_AHEAD */ /* TAU_STATIC_TIMER_STOP("LOOK_AHEAD_UPDATE"); */ } /* if L(:,k) and U(k,:) not empty */ /* stat->time3 += SuperLU_timer_()-tt1; */ /* ================== */ /* == post receive == */ /* ================== */ kk1 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1); for (kk0 = k0 + 1; kk0 <= kk1; kk0++) { kk = perm_c_supno[kk0]; kcol = PCOL (kk, grid); if (look_ahead[kk] == k0) { if (mycol != kcol) { if (ToRecv[kk] >= 1) { scp = &grid->rscp; /* The scope of process row. */ look_id = kk0 % (1 + num_look_aheads); recv_req = recv_reqs[look_id]; #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Irecv (Lsub_buf_2[look_id], Llu->bufmax[0], mpi_int_t, kcol, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */ scp->comm, &recv_req[0]); MPI_Irecv (Lval_buf_2[look_id], Llu->bufmax[1], SuperLU_MPI_DOUBLE_COMPLEX, kcol, SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */ scp->comm, &recv_req[1]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif } } else { lk = LBj (kk, grid); /* Local block number. */ lsub1 = Lrowind_bc_ptr[lk]; lusup1 = Lnzval_bc_ptr[lk]; if (factored[kk] == -1) { /* Factor diagonal and subdiagonal blocks and test for exact singularity. */ factored[kk] = 0; /* flag column kk as factored */ double ttt1 = SuperLU_timer_(); PZGSTRF2 (options, kk0, kk, thresh, Glu_persist, grid, Llu, U_diag_blk_send_req, tag_ub, stat, info); pdgstrf2_timer += SuperLU_timer_() - ttt1; /* Process column *kcol+1* multicasts numeric values of L(:,k+1) to process rows. */ look_id = kk0 % (1 + num_look_aheads); send_req = send_reqs[look_id]; msgcnt = msgcnts[look_id]; if (lsub1) { msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR; msgcnt[1] = lsub1[1] * SuperSize (kk); } else { msgcnt[0] = 0; msgcnt[1] = 0; } scp = &grid->rscp; /* The scope of process row. */ for (pj = 0; pj < Pc; ++pj) { if (ToSendR[lk][pj] != EMPTY) { #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */ scp->comm, &send_req[pj]); MPI_Isend (lusup1, msgcnt[1], SuperLU_MPI_DOUBLE_COMPLEX, pj, SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */ scp->comm, &send_req[pj + Pc]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; ++prof_sendR[lk]; #endif } } /* end for pj ... */ } /* if factored[kk] ... */ } } } double tsch = SuperLU_timer_(); /*******************************************************************/ #ifdef GPU_ACC #include "zSchCompUdt-cuda.c" #else /*#include "SchCompUdt--Phi-2Ddynamic-alt.c"*/ //#include "zSchCompUdt-2Ddynamic_v6.c" #include "zSchCompUdt-2Ddynamic.c" #endif /*uncomment following to compare against SuperLU 3.3 baseline*/ /* #include "SchCompUdt--baseline.c" */ /************************************************************************/ NetSchurUpTimer += SuperLU_timer_() - tsch; } /* MAIN LOOP for k0 = 0, ... */ /* ################################################################## ** END MAIN LOOP: for k0 = ... ################################################################## */ pxgstrfTimer = SuperLU_timer_() - pxgstrfTimer; #if ( PRNTlevel>=2 ) /* Print detailed statistics */ /* Updating total flops */ double allflops; MPI_Reduce(&RemainGEMM_flops, &allflops, 1, MPI_DOUBLE, MPI_SUM, 0, grid->comm); if ( iam==0 ) { printf("\nInitialization time\t%8.2lf seconds\n" "\t Serial: compute static schedule, allocate storage\n", InitTimer); printf("\n==== Time breakdown in factorization (rank 0) ====\n"); printf("Panel factorization \t %8.2lf seconds\n", pdgstrf2_timer + pdgstrs2_timer); printf(".. L-panel pxgstrf2 \t %8.2lf seconds\n", pdgstrf2_timer); printf(".. U-panel pxgstrs2 \t %8.2lf seconds\n", pdgstrs2_timer); printf("Time in Look-ahead update \t %8.2lf seconds\n", lookaheadupdatetimer); printf("Time in Schur update \t\t %8.2lf seconds\n", NetSchurUpTimer); printf(".. Time to Gather L buffer\t %8.2lf (Separate L panel by Lookahead/Remain)\n", GatherLTimer); printf(".. Time to Gather U buffer\t %8.2lf \n", GatherUTimer); printf(".. Time in GEMM %8.2lf \n", LookAheadGEMMTimer + RemainGEMMTimer); printf("\t* Look-ahead\t %8.2lf \n", LookAheadGEMMTimer); printf("\t* Remain\t %8.2lf\tFlops %8.2le\tGflops %8.2lf\n", RemainGEMMTimer, allflops, allflops/RemainGEMMTimer*1e-9); printf(".. Time to Scatter %8.2lf \n", LookAheadScatterTimer + RemainScatterTimer); printf("\t* Look-ahead\t %8.2lf \n", LookAheadScatterTimer); printf("\t* Remain\t %8.2lf \n", RemainScatterTimer); printf("Total factorization time \t: %8.2lf seconds, \n", pxgstrfTimer); printf("--------\n"); printf("GEMM maximum block: %d-%d-%d\n", gemm_max_m, gemm_max_k, gemm_max_n); } #endif #if ( DEBUGlevel>=3 ) for (i = 0; i < Pr * Pc; ++i) { if (iam == i) { zPrintLblocks(iam, nsupers, grid, Glu_persist, Llu); zPrintUblocks(iam, nsupers, grid, Glu_persist, Llu); printf ("(%d)\n", iam); PrintInt10 ("Recv", nsupers, Llu->ToRecv); } MPI_Barrier (grid->comm); } #endif /******************************************************** * Free memory * ********************************************************/ if (Pr * Pc > 1) { SUPERLU_FREE (Lsub_buf_2[0]); /* also free Lsub_buf_2[1] */ SUPERLU_FREE (Lval_buf_2[0]); /* also free Lval_buf_2[1] */ if (Llu->bufmax[2] != 0) SUPERLU_FREE (Usub_buf_2[0]); if (Llu->bufmax[3] != 0) SUPERLU_FREE (Uval_buf_2[0]); if (U_diag_blk_send_req[myrow] != MPI_REQUEST_NULL) { /* wait for last Isend requests to complete, deallocate objects */ for (krow = 0; krow < Pr; ++krow) { if (krow != myrow) MPI_Wait (U_diag_blk_send_req + krow, &status); } } SUPERLU_FREE (U_diag_blk_send_req); } log_memory( -((Llu->bufmax[0] + Llu->bufmax[2]) * (num_look_aheads + 1) * iword + (Llu->bufmax[1] + Llu->bufmax[3]) * (num_look_aheads + 1) * dword), stat ); SUPERLU_FREE (Lsub_buf_2); SUPERLU_FREE (Lval_buf_2); SUPERLU_FREE (Usub_buf_2); SUPERLU_FREE (Uval_buf_2); SUPERLU_FREE (perm_c_supno); SUPERLU_FREE (perm_u); #ifdef ISORT SUPERLU_FREE (iperm_u); #endif SUPERLU_FREE (look_ahead); SUPERLU_FREE (factoredU); SUPERLU_FREE (factored); log_memory(-(6 * nsupers * iword), stat); for (i = 0; i <= num_look_aheads; i++) { SUPERLU_FREE (msgcnts[i]); SUPERLU_FREE (msgcntsU[i]); } SUPERLU_FREE (msgcnts); SUPERLU_FREE (msgcntsU); for (i = 0; i <= num_look_aheads; i++) { SUPERLU_FREE (send_reqs_u[i]); SUPERLU_FREE (recv_reqs_u[i]); SUPERLU_FREE (send_reqs[i]); SUPERLU_FREE (recv_reqs[i]); } SUPERLU_FREE (recv_reqs_u); SUPERLU_FREE (send_reqs_u); SUPERLU_FREE (recv_reqs); SUPERLU_FREE (send_reqs); #ifdef GPU_ACC checkCuda (cudaFreeHost (bigV)); checkCuda (cudaFreeHost (bigU)); cudaFree( (void*)dA ); /* Sherry added */ cudaFree( (void*)dB ); cudaFree( (void*)dC ); SUPERLU_FREE( handle ); SUPERLU_FREE( streams ); SUPERLU_FREE( stream_end_col ); #else // #ifdef __INTEL_COMPILER // _mm_free (bigU); // _mm_free (bigV); // #else SUPERLU_FREE (bigV); SUPERLU_FREE (bigU); // #endif /* Decrement freed memory from memory stat. */ log_memory(-(bigv_size + bigu_size) * dword, stat); #endif SUPERLU_FREE (Llu->ujrow); // SUPERLU_FREE (tempv2d);/* Sherry */ SUPERLU_FREE (indirect); SUPERLU_FREE (indirect2); /* Sherry added */ ldt = sp_ienv_dist(3); log_memory( -(3 * ldt *ldt * dword + 2 * ldt * num_threads * iword), stat ); /* Sherry added */ SUPERLU_FREE(omp_loop_time); SUPERLU_FREE(full_u_cols); SUPERLU_FREE(blk_ldu); #if ( PRNTlevel>=1 ) log_memory(-2 * ncb * dword, stat); #endif SUPERLU_FREE(lookAheadFullRow); SUPERLU_FREE(lookAheadStRow); SUPERLU_FREE(lookAhead_lptr); SUPERLU_FREE(lookAhead_ib); SUPERLU_FREE(RemainStRow); SUPERLU_FREE(Remain_lptr); SUPERLU_FREE(Remain_ib); SUPERLU_FREE(Remain_info); SUPERLU_FREE(lookAhead_L_buff); SUPERLU_FREE(Remain_L_buff); log_memory( -(3 * mrb * iword + mrb * sizeof(Remain_info_t) + ldt * ldt * (num_look_aheads + 1) * dword + Llu->bufmax[1] * dword), stat ); SUPERLU_FREE(Ublock_info); SUPERLU_FREE(Ublock_info_iukp); SUPERLU_FREE(Ublock_info_rukp); SUPERLU_FREE(Ublock_info_jb); #if ( PROFlevel>=1 ) TIC (t1); #endif /* Prepare error message - find the smallesr index i that U(i,i)==0 */ if ( *info == 0 ) *info = n + 1; MPI_Allreduce (info, &iinfo, 1, MPI_INT, MPI_MIN, grid->comm); if ( iinfo == n + 1 ) *info = 0; else *info = iinfo; #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; { float msg_vol_max, msg_vol_sum, msg_cnt_max, msg_cnt_sum; MPI_Reduce (&msg_cnt, &msg_cnt_sum, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm); MPI_Reduce (&msg_cnt, &msg_cnt_max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm); MPI_Reduce (&msg_vol, &msg_vol_sum, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm); MPI_Reduce (&msg_vol, &msg_vol_max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm); if ( iam==0 ) { printf ("\tPZGSTRF comm stat:" "\tAvg\tMax\t\tAvg\tMax\n" "\t\t\tCount:\t%.0f\t%.0f\tVol(MB)\t%.2f\t%.2f\n", msg_cnt_sum / Pr / Pc, msg_cnt_max, msg_vol_sum / Pr / Pc * 1e-6, msg_vol_max * 1e-6); printf("\t\tcomm time on task 0: %8.2lf\n" "\t\t\tcomm down DIAG block %8.2lf\n" "\t\t\tcomm right L panel %8.2lf\n" "\t\t\tcomm down U panel %8.2lf\n", stat->utime[COMM], stat->utime[COMM_DIAG], stat->utime[COMM_RIGHT], stat->utime[COMM_DOWN]); //#include <float.h> //int Digs = DECIMAL_DIG; printf("gemm_count %d\n", gemm_count); for (i = 0; i < gemm_count; ++i) fprintf(fgemm, "%8d%8d%8d\t %20.16e\t%8d\n", gemm_stats[i].m, gemm_stats[i].n, gemm_stats[i].k, gemm_stats[i].microseconds, prof_sendR[i]); fclose(fgemm); } SUPERLU_FREE(gemm_stats); SUPERLU_FREE(prof_sendR); } #endif #if ( PRNTlevel==3 ) MPI_Allreduce (&zero_msg, &iinfo, 1, MPI_INT, MPI_SUM, grid->comm); if (!iam) printf (".. # msg of zero size\t%d\n", iinfo); MPI_Allreduce (&total_msg, &iinfo, 1, MPI_INT, MPI_SUM, grid->comm); if (!iam) printf (".. # total msg\t%d\n", iinfo); #endif #if ( DEBUGlevel>=3 ) for (i = 0; i < Pr * Pc; ++i) { if (iam == i) { zPrintLblocks (iam, nsupers, grid, Glu_persist, Llu); zPrintUblocks (iam, nsupers, grid, Glu_persist, Llu); printf ("(%d)\n", iam); PrintInt10 ("Recv", nsupers, Llu->ToRecv); } MPI_Barrier (grid->comm); } #endif #if ( DEBUGlevel>=3 ) printf ("(%d) num_copy=%d, num_update=%d\n", iam, num_copy, num_update); #endif #if ( DEBUGlevel>=1 ) CHECK_MALLOC (iam, "Exit pzgstrf()"); #endif return 0; } /* PZGSTRF */
Layer_Conv2D.h
/* * Layers.h * rl * * Created by Guido Novati on 11.02.16. * Copyright 2016 ETH Zurich. All rights reserved. * */ #pragma once #include "Layers.h" template <int InX, int InY, int InC, // input image: x:width, y:height, c:color channels int KnX, int KnY, int KnC, // filter: x:width, y:height, c:color channels int OpX, int OpY // output img: x:width, y:height, same color channels as KnC > struct Conv2DLayer : public Layer { Params *allocate_params() const override { // number of kernel parameters: // 2d kernel size * number of inp channels * number of out channels const int nParams = KnY * KnX * InC * KnC; const int nBiases = KnC; return new Params(nParams, nBiases); } Conv2DLayer(const int _ID) : Layer(OpX * OpY * KnC, _ID) { static_assert(InX > 0 && InY > 0 && InC > 0, "Invalid input"); static_assert(KnX > 0 && KnY > 0 && KnC > 0, "Invalid kernel"); static_assert(OpX > 0 && OpY > 0, "Invalid outpus"); print(); } void print() { printf("(%d) Conv: In:[%d %d %d %d %d] F:[%d %d %d %d] Out:[%d %d %d]\n", ID, OpY, OpX, KnY, KnX, InC, KnY, KnX, InC, KnC, OpX, OpY, KnC); } void forward(const std::vector<Activation *> &act, const std::vector<Params *> &param) const override { assert(act[ID]->layersSize == OpY * OpX * KnC); assert(act[ID - 1]->layersSize == OpY * OpX * KnY * KnX * InC); assert(param[ID]->nWeights == KnY * KnX * InC * KnC); assert(param[ID]->nBiases == KnC); const int batchSize = act[ID]->batchSize; const Real *const INP = act[ID - 1]->output; Real *const OUT = act[ID]->output; // reset layers' output with the bias #pragma omp parallel for collapse(2) for (int i = 0; i < batchSize * OpY * OpX; ++i) { for (int j = 0; j < KnC; ++j) { OUT[i * KnC + j] = param[ID]->biases[j]; } } // perform the forward step with gemm gemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, batchSize * OpY * OpX, KnC, KnY * KnX * InC, 1., INP, KnY * KnX * InC, param[ID]->weights, KnC, 1., OUT, KnC); } void bckward(const std::vector<Activation *> &act, const std::vector<Params *> &param, const std::vector<Params *> &grad) const override { const int batchSize = act[ID]->batchSize; const Real *const dEdO = act[ID]->dError_dOutput; // BackProp to compute bias gradient: dError / dBias { Real *const grad_B = grad[ID]->biases; std::fill(grad_B, grad_B + KnC, 0.); #pragma omp parallel for collapse(2) for (int i = 0; i < batchSize * OpY * OpX; ++i) { for (int j = 0; j < KnC; ++j) { #pragma omp atomic grad_B[j] += dEdO[i * KnC + j]; } } } // BackProp to compute weight gradient: dError / dWeights { Real *const grad_W = grad[ID]->weights; std::fill(grad_W, grad_W + KnY * KnX * InC * KnC, 0); gemm(CblasRowMajor, CblasTrans, CblasNoTrans, KnY * KnX * InC, KnC, batchSize * OpY * OpX, 1., act[ID - 1]->output, KnY * KnX * InC, dEdO, KnC, 0., grad_W, KnC); } // BackProp to compute dEdO of prev layer { Real *const errinp = act[ID - 1]->dError_dOutput; std::fill(errinp, errinp + batchSize * OpY * OpX * KnY * KnX * InC, 0.); gemm(CblasRowMajor, CblasNoTrans, CblasTrans, batchSize * OpY * OpX, KnY * KnX * InC, KnC, 1., dEdO, KnC, param[ID]->weights, KnC, 0., errinp, KnY * KnX * InC); } } void init(std::mt19937 &gen, const std::vector<Params *> &param) const override { // get pointers to layer's weights and bias Real *const W = param[ID]->weights, *const B = param[ID]->biases; // initialize weights with Xavier initialization const int nAdded = KnX * KnY * InC, nW = param[ID]->nWeights; const Real scale = std::sqrt(6.0 / (nAdded + KnC)); std::uniform_real_distribution<Real> dis(-scale, scale); std::generate(W, W + nW, [&]() { return dis(gen); }); std::fill(B, B + KnC, 0); } };
dyn_cc.h
#ifndef DYN_CC_H_ #define DYN_CC_H_ #include <random> #include "traversal.h" #include "../common/timer.h" #include "sliding_queue_dynamic.h" #include "../common/pvector.h" /* Algorithm: Incremental CC and CC starting from scratch */ #include <fstream> extern std::ofstream algF; typedef float Component; template<typename T> void CCIter0(T* ds, SlidingQueue<NodeID>& queue){ pvector<bool> visited(ds->num_nodes, false); #pragma omp parallel { QueueBuffer<NodeID> lqueue(queue); #pragma omp for schedule(dynamic, 64) for(NodeID n=0; n < ds->num_nodes; n++){ if(ds->affected[n]){ Component old_comp = ds->property[n]; Component new_comp = old_comp; // calculate new component for(auto v: in_neigh(n, ds)){ if(ds->property[v] < new_comp) new_comp = ds->property[v]; } if(ds->directed){ for(auto v: out_neigh(n, ds)){ if(ds->property[v] < new_comp) new_comp = ds->property[v]; } } assert(new_comp<= old_comp); ds->property[n] = new_comp; bool trigger = ((ds->property[n] < old_comp) || (old_comp == n)); if(trigger){ //put the out-neighbors into active list for(auto v: in_neigh(n, ds)){ bool curr_val = visited[v]; if(!curr_val){ if(compare_and_swap(visited[v], curr_val, true)) lqueue.push_back(v); } } if(ds->directed){ for(auto v: out_neigh(n, ds)){ bool curr_val = visited[v]; if(!curr_val){ if(compare_and_swap(visited[v], curr_val, true)) lqueue.push_back(v); } } } } } } lqueue.flush(); } } template<typename T> void dynCCAlg(T* ds){ std::cout << "Running dynamic CC" << std::endl; Timer t; t.Start(); SlidingQueue<NodeID> queue(ds->num_nodes); // Assign component of newly added vertices #pragma omp parallel for schedule(dynamic, 64) for(NodeID n = 0; n < ds->num_nodes; n++){ if(ds->property[n] == -1){ ds->property[n] = n; } } CCIter0(ds, queue); queue.slide_window(); while(!queue.empty()){ //std::cout << "Queue not empty, Queue size: " << queue.size() << std::endl; pvector<bool> visited(ds->num_nodes, false); #pragma omp parallel { QueueBuffer<NodeID> lqueue(queue); #pragma omp for schedule(dynamic, 64) for (auto q_iter = queue.begin(); q_iter < queue.end(); q_iter++){ NodeID n = *q_iter; Component old_comp = ds->property[n]; Component new_comp = old_comp; // calculate new component for(auto v: in_neigh(n, ds)){ if(ds->property[v] < new_comp) new_comp = ds->property[v]; } if(ds->directed){ for(auto v: out_neigh(n, ds)){ if(ds->property[v] < new_comp) new_comp = ds->property[v]; } } assert(new_comp<= old_comp); ds->property[n] = new_comp; bool trigger = (ds->property[n] < old_comp); if(trigger){ for(auto v: in_neigh(n, ds)){ bool curr_val = visited[v]; if(!curr_val){ if(compare_and_swap(visited[v], curr_val, true)) lqueue.push_back(v); } } if(ds->directed){ for(auto v: out_neigh(n, ds)){ bool curr_val = visited[v]; if(!curr_val){ if(compare_and_swap(visited[v], curr_val, true)) lqueue.push_back(v); } } } } } lqueue.flush(); } queue.slide_window(); } // clear affected array to get ready for the next update round #pragma omp parallel for schedule(dynamic, 64) for(NodeID i = 0; i < ds->num_nodes; i++){ ds->affected[i] = false; } t.Stop(); algF << t.Seconds() << std::endl; } template<typename T> void CCStartFromScratch(T* ds){ //std::cout << "Number of nodes: "<< ds->num_nodes << std::endl; std::cout << "Running CC from scratch" << std::endl; Timer t; t.Start(); #pragma omp parallel for for (NodeID n=0; n < ds->num_nodes; n++) ds->property[n] = n; bool change = true; int num_iter = 0; if(ds->directed){ while (change){ change = false; num_iter++; #pragma omp parallel for for (NodeID u=0; u < ds->num_nodes; u++) { for (NodeID v : out_neigh(u, ds)){ NodeID comp_u = ds->property[u]; NodeID comp_v = ds->property[v]; if (comp_u == comp_v) continue; // Hooking condition so lower component ID wins independent of direction NodeID high_comp = comp_u > comp_v ? comp_u : comp_v; NodeID low_comp = comp_u + (comp_v - high_comp); if (high_comp == ds->property[high_comp]) { change = true; ds->property[high_comp] = low_comp; } } } #pragma omp parallel for for (NodeID n=0; n < ds->num_nodes; n++){ while (ds->property[n] != ds->property[ds->property[n]]){ ds->property[n] = ds->property[ds->property[n]]; } } } } else{ while (change) { change = false; num_iter++; #pragma omp parallel for for (NodeID u=0; u < ds->num_nodes; u++) { NodeID comp_u = ds->property[u]; for (NodeID v : out_neigh(u, ds)) { NodeID comp_v = ds->property[v]; // To prevent cycles, we only perform a hook in a consistent direction // (comp_u < comp_v). Since the graph is undirected, the condition // will be true from one side. if ((comp_u < comp_v) && (comp_v == ds->property[comp_v])) { change = true; ds->property[comp_v] = comp_u; } } } #pragma omp parallel for for (NodeID n=0; n < ds->num_nodes; n++) { while (ds->property[n] != ds->property[ds->property[n]]) { ds->property[n] = ds->property[ds->property[n]]; } } } } t.Stop(); algF << t.Seconds() << std::endl; //std::cout << "Shiloach-Vishkin took " << num_iter << " iterations" << std::endl; } #endif // DYN_CC_H_
opencl_7z_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for 7z format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_sevenzip; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_sevenzip); #else #include <string.h> #include <openssl/aes.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "common-opencl.h" #include "options.h" #include "crc32.h" #include "stdint.h" #include "unicode.h" #include "memdbg.h" #define FORMAT_LABEL "7z-opencl" #define FORMAT_NAME "7-Zip" #define FORMAT_TAG "$7z$" #define TAG_LENGTH 4 #define ALGORITHM_NAME "SHA256 OPENCL AES" #define BENCHMARK_COMMENT " (512K iterations)" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define PLAINTEXT_LENGTH ((55-8)/2) #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #define BIG_ENOUGH (8192 * 32) typedef struct { uint32_t length; uint16_t v[PLAINTEXT_LENGTH]; } sevenzip_password; typedef struct { uint8_t key[32]; } sevenzip_hash; typedef struct { uint32_t length; uint32_t iterations; uint8_t salt[16]; } sevenzip_salt; typedef struct { cl_uint total[2]; cl_uint state[8]; cl_uchar buffer[64]; } SHA256_CTX; typedef struct { cl_ulong t; SHA256_CTX ctx; cl_uint len; cl_ushort buffer[PLAINTEXT_LENGTH]; } sevenzip_state; static int *cracked; static int any_cracked; static struct custom_salt { int NumCyclesPower; int SaltSize; int ivSize; int type; unsigned char data[BIG_ENOUGH]; unsigned char iv[16]; unsigned char salt[16]; unsigned int crc; int length; /* used in decryption */ int unpacksize; /* used in CRC calculation */ } *cur_salt; static struct fmt_tests sevenzip_tests[] = { /* CRC checks passes for these hashes */ {"$7z$0$19$0$1122$8$d1f50227759415890000000000000000$1412385885$112$112$5e5b8b734adf52a64c541a5a5369023d7cccb78bd910c0092535dfb013a5df84ac692c5311d2e7bbdc580f5b867f7b5dd43830f7b4f37e41c7277e228fb92a6dd854a31646ad117654182253706dae0c069d3f4ce46121d52b6f20741a0bb39fc61113ce14d22f9184adafd6b5333fb1", "password"}, {"$7z$0$19$0$1122$8$a264c94f2cd72bec0000000000000000$725883103$112$108$64749c0963e20c74602379ca740165b9511204619859d1914819bc427b7e5f0f8fc67f53a0b53c114f6fcf4542a28e4a9d3914b4bc76baaa616d6a7ec9efc3f051cb330b682691193e6fa48159208329460c3025fb273232b82450645f2c12a9ea38b53a2331a1d0858813c8bf25a831", "openwall"}, /* padding check passes for these hashes */ {"$7z$0$19$0$1122$8$732b59fd26896e410000000000000000$2955316379$192$183$7544a3a7ec3eb99a33d80e57907e28fb8d0e140ec85123cf90740900429136dcc8ba0692b7e356a4d4e30062da546a66b92ec04c64c0e85b22e3c9a823abef0b57e8d7b8564760611442ecceb2ca723033766d9f7c848e5d234ca6c7863a2683f38d4605322320765938049305655f7fb0ad44d8781fec1bf7a2cb3843f269c6aca757e509577b5592b60b8977577c20aef4f990d2cb665de948004f16da9bf5507bf27b60805f16a9fcc4983208297d3affc4455ca44f9947221216f58c337f", "password"}, /* not supported hashes, will require validFolder check */ // {"$7z$0$19$0$1122$8$5fdbec1569ff58060000000000000000$2465353234$112$112$58ba7606aafc7918e3db7f6e0920f410f61f01e9c1533c40850992fee4c5e5215bc6b4ea145313d0ac065b8ec5b47d9fb895bb7f97609be46107d71e219544cfd24b52c2ecd65477f72c466915dcd71b80782b1ac46678ab7f437fd9f7b8e9d9fad54281d252de2a7ae386a65fc69eda", "password"}, {NULL} }; static sevenzip_password *inbuffer; static sevenzip_hash *outbuffer; static sevenzip_salt currentsalt; static cl_mem mem_in, mem_out, mem_state, mem_salt; static cl_kernel sevenzip_init; #define insize (sizeof(sevenzip_password) * global_work_size) #define outsize (sizeof(sevenzip_hash) * global_work_size) #define statesize (sizeof(sevenzip_state) * global_work_size) #define saltsize (sizeof(sevenzip_salt)) #define cracked_size (sizeof(*cracked) * global_work_size) #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define OCL_CONFIG "7z" #define HASH_LOOPS 4096 #define LOOP_COUNT ((1 << currentsalt.iterations) + HASH_LOOPS - 1) / HASH_LOOPS #define STEP 0 #define SEED 16 static int split_events[] = { 2, -1, -1 }; static const char *warn[] = { "xfer: " , ", init: ", ", crypt: ", ", xfer: " }; // This file contains auto-tuning routine(s). It has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, sevenzip_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel)); return s; } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return get_platform_vendor_id(platform_id) == DEV_INTEL ? 8 : 1; else return 64; } static void create_clobj(size_t global_work_size, struct fmt_main *self) { cl_int cl_error; inbuffer = (sevenzip_password*) mem_calloc(insize); outbuffer = (sevenzip_hash*) mem_alloc(outsize); cracked = mem_calloc(cracked_size); // Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, saltsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem salt"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, statesize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem state"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(sevenzip_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(sevenzip_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(sevenzip_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); } static void release_clobj(void) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem salt"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(sevenzip_init), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); } static int crypt_all(int *pcount, struct db_salt *salt); static int crypt_all_benchmark(int *pcount, struct db_salt *salt); static void init(struct fmt_main *self) { CRC32_t crc; char build_opts[64]; cl_int cl_error; CRC32_Init(&crc); snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%d -DHASH_LOOPS=%d", PLAINTEXT_LENGTH, HASH_LOOPS); opencl_init("$JOHN/kernels/7z_kernel.cl", gpu_id, build_opts); sevenzip_init = clCreateKernel(program[gpu_id], "sevenzip_init", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); crypt_kernel = clCreateKernel(program[gpu_id], "sevenzip_crypt", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, sizeof(sevenzip_salt), 0); // Auto tune execution from shared/included code. self->methods.crypt_all = crypt_all_benchmark; autotune_run(self, 1 << 19, 0, 15000000000ULL); self->methods.crypt_all = crypt_all; if (pers_opts.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int isdecu(char *q) { char buf[24]; unsigned int x = atou(q); sprintf(buf, "%u", x); return !strcmp(q,buf); } static int isdec(char *q) { char buf[24]; int x = atoi(q); sprintf(buf, "%d", x); return !strcmp(q,buf); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int len, type, NumCyclesPower; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtok(ctcopy, "$")) == NULL) goto err; if (strlen(p) > 1) goto err; type = atoi(p); if (type != 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* NumCyclesPower */ goto err; if (strlen(p) > 2) goto err; NumCyclesPower = atoi(p); if (NumCyclesPower > 24 || NumCyclesPower < 1) goto err; if ((p = strtok(NULL, "$")) == NULL) /* salt length */ goto err; len = atoi(p); if(len > 16 || len < 0) /* salt length */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* salt */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* iv length */ goto err; if (strlen(p) > 2) goto err; len = atoi(p); if(len < 0 || len > 16) /* iv length */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* iv */ goto err; if (!ishex(p)) goto err; if (strcmp(p+len*2, "0000000000000000")) goto err; if ((p = strtok(NULL, "$")) == NULL) /* crc */ goto err; if (!isdecu(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* data length */ goto err; len = atoi(p); if ((p = strtok(NULL, "$")) == NULL) /* unpacksize */ goto err; if (!isdec(p)) /* no way to validate, other than atoi() works for it */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* data */ goto err; if (strlen(p) != len * 2) /* validates data_len atoi() */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static union { struct custom_salt _cs; ARCH_WORD_32 dummy; } un; struct custom_salt *cs = &(un._cs); ctcopy += 4; p = strtok(ctcopy, "$"); cs->type = atoi(p); p = strtok(NULL, "$"); cs->NumCyclesPower = atoi(p); p = strtok(NULL, "$"); cs->SaltSize = atoi(p); p = strtok(NULL, "$"); /* salt */ p = strtok(NULL, "$"); cs->ivSize = atoi(p); p = strtok(NULL, "$"); /* iv */ for (i = 0; i < cs->ivSize; i++) cs->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); /* crc */ cs->crc = atou(p); p = strtok(NULL, "$"); cs->length = atoi(p); p = strtok(NULL, "$"); cs->unpacksize = atoi(p); p = strtok(NULL, "$"); /* crc */ for (i = 0; i < cs->length; i++) cs->data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->SaltSize); currentsalt.length = cur_salt->SaltSize; currentsalt.iterations = cur_salt->NumCyclesPower; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, saltsize, &currentsalt, 0, NULL, NULL), "Transfer salt to gpu"); } static void clear_keys(void) { memset(inbuffer, 0, insize); } static void sevenzip_set_key(char *key, int index) { UTF16 c_key[PLAINTEXT_LENGTH + 1]; int length = strlen(key); /* Convert password to utf-16-le format (--encoding aware) */ length = enc_to_utf16(c_key, PLAINTEXT_LENGTH, (UTF8*)key, length); if (length <= 0) length = strlen16(c_key); inbuffer[index].length = length; memcpy(inbuffer[index].v, c_key, 2 * length); } static char *get_key(int index) { UTF16 c_key[PLAINTEXT_LENGTH + 1]; int length = inbuffer[index].length; memcpy(c_key, inbuffer[index].v, 2 * length); c_key[length] = 0; return (char*)utf16_to_enc(c_key); } // XXX port Python code to C *OR* use code from LZMA SDK static int validFolder(unsigned char *data) { // int numcoders = self._read64Bit(file) return 0; } static int sevenzip_decrypt(unsigned char *derived_key, unsigned char *data) { unsigned char out[cur_salt->length]; AES_KEY akey; unsigned char iv[16]; union { unsigned char crcc[4]; unsigned int crci; } _crc_out; unsigned char *crc_out = _crc_out.crcc; unsigned int ccrc; CRC32_t crc; int i; int nbytes, margin; memcpy(iv, cur_salt->iv, 16); if(AES_set_decrypt_key(derived_key, 256, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n"); } AES_cbc_encrypt(cur_salt->data, out, cur_salt->length, &akey, iv, AES_DECRYPT); /* various verifications tests */ // test 0, padding check, bad hack :-( margin = nbytes = cur_salt->length - cur_salt->unpacksize; i = cur_salt->length - 1; while (nbytes > 0) { if (out[i] != 0) return -1; nbytes--; i--; } if (margin > 7) { // printf("valid padding test ;-)\n"); // print_hex(out, cur_salt->length); return 0; } // test 1, CRC test CRC32_Init(&crc); CRC32_Update(&crc, out, cur_salt->unpacksize); CRC32_Final(crc_out, crc); ccrc = _crc_out.crci; // computed CRC if (ccrc == cur_salt->crc) return 0; // XXX don't be too eager! // XXX test 2, "well-formed folder" test if (validFolder(out)) { printf("validFolder check ;-)\n"); return 0; } return -1; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i, index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = local_work_size ? (count + local_work_size - 1) / local_work_size * local_work_size : count; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } // Copy data to gpu HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, NULL), "Copy data to gpu"); // Run 1st kernel HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_init, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run init kernel"); // Run loop kernel for (i = 0; i < LOOP_COUNT; i++) { HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run loop kernel"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } // Read the result back HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, NULL), "Copy result back"); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { /* decrypt and check */ if(sevenzip_decrypt(outbuffer[index].key, cur_salt->data) == 0) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int crypt_all_benchmark(int *pcount, struct db_salt *salt) { int count = *pcount; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = local_work_size ? (count + local_work_size - 1) / local_work_size * local_work_size : count; // Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); // Run 1st kernels BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run init kernel"); // Warm-up run BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run loop kernel"); // Loop kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[3]), "Copy result back"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)(1 << my_salt->NumCyclesPower); } #endif struct fmt_main fmt_opencl_sevenzip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT | FMT_UNICODE | FMT_UTF8, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif sevenzip_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, sevenzip_set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
LAGraph_bfs_pushpull.c
//------------------------------------------------------------------------------ // LAGraph_bfs_pushpull: push-pull breadth-first search //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2020 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ #include "LAGraph_bfs_pushpull.h" #include "../config.h" //------------------------------------------------------------------------------ // LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search, // contributed by Tim Davis, Texas A&M. // LAGraph_bfs_pushpull computes the BFS of a graph from a single given // source node. The result is a vector v where v(i)=k if node i was placed // at level k in the BFS. // Usage: // info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ; // GrB_Vector *v: a vector containing the result, created on output. // v(i) = k is the BFS level of node i in the graph, where a source // node has v(source)=1. v(i) is implicitly zero if it is unreachable // from the source node. That is, GrB_Vector_nvals (&nreach,v) is the // size of the reachable set of the source node, for a single-source // BFS. v may be returned as sparse, or full. If full, v(i)=0 // indicates that node i was not reached. If sparse, the pattern of v // indicates the set of nodes reached. // GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing. // pi(source) = source+1 for source node. pi(i) = p+1 if p is the // parent of i. If pi is sparse, and pi(i) is not present, then node // i has not been reached. Otherwise, if pi is full, then pi(i)=0 // indicates that node i was not reached. // GrB_Matrix A: a square matrix of any type. The values of A are not // accessed. The presence of the entry A(i,j) indicates the edge // (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge. // GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm // is a conventional push-only BFS. If not NULL, AT must be the // transpose of A, and a push-pull algorithm is used (NOTE: this // assumes GraphBLAS stores its matrix in CSR form; see discussion // below). Results are undefined if AT is not NULL but not identical // to the transpose of A. // int64_t source: the source node for the BFS. // int64_t max_level: An optional limit on the levels searched for the // single-source BFS. If zero, then no limit is enforced. If > 0, // then only nodes with v(i) <= max_level will be visited. That is: // 1: just the source node, 2: the source and its neighbors, 3: the // source node, its neighbors, and their neighbors, etc. // bool vsparse: if the result v may remain very sparse, then set this // parameter to true. If v might have many entries, set it false. If // you are unsure, then set it to true. This parameter speeds up // the handling of v. If you guess wrong, there is a slight // performance penalty. The results are not affected by this // parameter, just the performance. This parameter is used only for // the single-source BFS. // single-source BFS: // Given a graph A, a source node, find all nodes reachable from the // source node. v(source)=1, v(i)=2 if edge (source,i) appears in the // graph, and so on. If node i is not reachable from source, then // implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not // an entry in this vector. // This algorithm can use the push-pull strategy, which requires both A and // AT=A' to be passed in. If the graph is known to be symmetric, then the same // matrix A can be passed in for both arguments. Results are undefined if AT // is not the transpose of A. // If only A or AT is passed in, then only single strategy will be used: push // or pull, but not both. In general, push-only performs well. A pull-only // strategy is possible but it is exceedingly slow. Assuming A and AT are both // in CSR format, then (let s = source node): // LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest) // LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good) // LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!) // If A and AT are both in CSC format, then: // LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest) // LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good) // LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!) // Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS // detects this case and refuses to do it. // The basic step of this algorithm computes A'*q where q is the 'queue' of // nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' = // A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing, // just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is // simultaneously a row and column vector, so q and q' are interchangeable. // To implement an efficient BFS using GraphBLAS, an assumption must be made in // LAGraph about how the matrix is stored, whether by row or by column (or // perhaps some other opaque data structure). The storage format has a huge // impact on the relative performance of vxm(q,A) and mxv(AT,q). // Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily // accessible. In terms of the graph A, this means that the out-adjacency // list of node i can be traversed in time O(out-degree of node i). // If AT is stored by row, then AT(i,:) is the in-adjacency list of node i, // and traversing row i of AT can be done in O(in-degree of node i) time. // The CSR (Compressed Sparse Row) format is the default for // SuiteSparse:GraphBLAS, but no assumption can be made about any particular // GraphBLAS library implementation. // If A and AT are both stored by column instead, then A(i,:) is not easy to // access. Instead, A(:,i) is the easily-accessible in-adjacency of node i, // and AT(:,i) is the out-adjancency. // A push step requires the out-adjacencies of each node, where as // a pull step requires the in-adjacencies of each node. // vxm(q,A) = A'*q, with A stored by row: a push step // mxv(AT,q) = A'*q, with AT stored by row: a pull step // vxm(q,A) = A'*q, with A stored by col: a pull step // mxv(AT,q) = A'*q, with AT stored by col: a push step // The GraphBLAS data structure is opaque. An implementation may decide to // store the matrix A in both formats, internally, so that it easily traverse // both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i) // can both be easily traversed). This would make a push-pull BFS easy to // implement using just the opaque GrB_Matrix A, but it doubles the storage. // Deciding which format to use automatically is not a simple task, // particularly since the decision must work well throughout GraphBLAS, not // just for the BFS. // MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column). // As a result, the MATLAB expression x=AT*q is a push step, computed using a // saxpy-based algorithm internally, and x=A'*q is a pull step, computed using // a dot product. // SuiteSparse:GraphBLAS can store a matrix in either format, but this requires // an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where // f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library // could be augmented in the future with f = Gxb_BY_BOTH. It currently does // not select the format automatically. As a result, if GxB_set is not used, // all its GrB_Matrix objects are stored by row (CSR). // SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via // GxB_set) the format, whether by row or by column. The hypersparsity of // A is selected automatically, with optional hints from the user application, // but a selection between hypersparsity vs standard CSR and CSC has no effect // on the push vs pull decision made here. // The push/pull and saxpy/dot connection can be described as follows. // Assume for these first two examples that MATLAB stores its matrices in CSR // format, where accessing A(i,:) is fast. // If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB // notation as: /* function x = vxm (q,A) % a push step: compute x = q'*A where q is a column vector x = sparse (1,n) for i = 1:n % a saxpy operation, using the ith row of A and the scalar q(i) x = x + q (i) * A (i,:) end */ // If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes // a dot product: /* function x = mxv (AT,q) % a pull step: compute x = AT*q where q is a column vector for i = 1:n % a dot-product of the ith row of AT and the column vector q x (i) = AT (i,:) * q end */ // The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and // mxv(AT,q) by default, where A and AT are stored by row by default. However, // they would be very slow in MATLAB, since it stores its sparse matrices in // CSC format. In that case, if A is stored by column and thus accessing // A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product // instead. These two snippets assume the matrices are both in CSR for, and // thus make more efficient use of MATLAB: /* function x = vxm (q,A) % a pull step: compute x = q'*A where q is a column vector for j = 1:n % a dot product of the row vector q' and the jth column of A x (j) = q' * A (:,j) end */ // If AT is stored by column, then x = mvx(AT,q) is /* function x = mxv (AT,q) % a push step: compute x = AT*q where q is a column vector for j = 1:n % a saxpy operation, using the jth column of AT and the scalar q(i) x = x + AT (:,j) * q end */ // In MATLAB, if q is a sparse column vector and A is a sparse matrix, then // x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a // dot product. You can view the code used internally in MATLAB for its sparse // matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT // packages, at http://suitesparse.com. // This raises an interesting puzzle for LAGraph, which is intended on being a // graph library that can be run on any implementation of GraphBLAS. There are // no mechanisms in the GraphBLAS C API for LAGraph (or other external packages // or user applications) to provide hints to GraphBLAS. Likely, there are no // query mechanisms where LAGraph can ask GraphBLAS how its matrices might be // stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer // from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it // does not answer this query. // There are two solutions to this puzzle. The most elegant one is for // GraphBLAS to handle all this internally, and change formats as needed. It // could choose to store A in both CSR and CSC format, or use an entirely // different data structure, and it would make the decision between the push or // pull, at each step of the BFS. This is not a simple task since the API is // complex. Furthermore, the selection of the data structure for A has // implications on all other GraphBLAS operations (submatrix assignment and // extraction, for example). // However, if A were to be stored in both CSR and CSC format, inside the // opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would // become a push-pull BFS. // The second solution is to allow the user application or library such as // LAGraph to provide hints and allow it to query the GraphBLAS library. // There are no such features in the GraphBLAS C API. // SuiteSparse:GraphBLAS takes the second approach: It adds two functions that // are extensions to the API: GxB_set changes the format (CSR or CSC), and // GxB_get can query the format. Even this this simplication, // SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm // (per semiring), and selects between them automatically. By default, all of // its matrices are stored in CSR format (either sparse or hypersparse, // selected automatically). So if no GxB_* extensions are used, all matrices // are in CSR format. // If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this // particular function assumes that its input matrices are in CSR format, or at // least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it // is the responsibilty of this function to select between using a push or a // pull, for each step in the BFS. // The following analysis assumes CSR format, and it assumes that dot-product // (a pull step) can terminate early via a short-circuit rule with the OR // monoid, as soon as it encounters a TRUE value. This cuts the time for the // dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse: // GraphBLAS does (in version 2.3.0 and later). Early termination cannot be // done for the saxpy (push step) method. // The work done by the push method (saxpy) is very predictable. BFS uses a // complemented mask. There is no simple way to exploit a complemented mask, // and saxpy has no early termination rule. If the set of nodes in the current // level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree, // this becomes d*nq where nq = length (q): // pushwork = d*nq // The work done by the pull (dot product) method is less predictable. It can // exploit the complemented mask, and so it only computes (n-nvisited) dot // products, if nvisited is the # of nodes visited so far (in all levels). // With no early-termination, the dot product will take d * log2 (nq) time, // assuming that q is large and a binary search is used internally. That is, // the dot product will scan through the d entries in A(i,:), and do a binary // search for each entry in q. To account for the higher constant of a binary // search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination, // d is too high. If the nodes are randomly marked, the probability of each // node being marked is nvisited/n. The expected number of trials until // success, for a sequence of events with probabilty p, is 1/p. Thus, the // expected number of iterations in a dot product before an early termination // is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero. // However, it cannot exceed d. Thus, the total work for the dot product // (pull) method can be estimated as: // per_dot = min (d, n / (nvisited+1)) // pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq))) // The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later, // and may be reasonable for other GraphBLAS implementations. Push or pull // is selected as the one with the least work. // TODO: change the formula for v3.2.0 // The push/pull decision requires that both A and AT be passed in, but this // function can use just one or the other. If only A is passed in and AT is // NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull // step if A is CSC). If only AT is passed in and A is NULL, then only // mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is // CSC). // In general, while a push-pull strategy is the fastest, a push-only BFS will // give good peformance. In particular, the time to compute AT=A' plus the // time for the push-pull BFS is typically higher than just a push-only BFS. // This why this function does not compute AT=A'. To take advantage of the // push-pull method, both A and AT must already be available, with the cost to // construct them amortized across other computations such as this one. // A pull-only strategy will be *exceeding* slow. // The input matrix A must be square. It can be non-binary, but best // performance will be obtained if it is GrB_BOOL. It can have explicit // entries equal to zero. These are safely ignored, and are treated as // non-edges. // SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs. // In this case, if both matrices are provided, they must be in the same // format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC // format, vxm(q,A) is the pull step and mxv(AT,q) is the push step. // If only A or AT are provided, and the result is a pull-only algorithm, // an error is returned. // References: // Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull // Efficiently in GraphBLAS. In Proceedings of the 47th International // Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA, // Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122 // Scott Beamer, Krste Asanovic and David A. Patterson, // The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015. // http://gap.cs.berkeley.edu/ #define LAGRAPH_FREE_ALL \ { \ GrB_free (&v) ; \ GrB_free (&t) ; \ GrB_free (&q) ; \ GrB_free (&pi) ; \ } #define LAGRAPH_ERROR(message,info) \ { \ fprintf (stderr, "LAGraph error: %s\n[%d]\nFile: %s Line: %d\n", \ message, info, __FILE__, __LINE__) ; \ LAGRAPH_FREE_ALL ; \ return (info) ; \ } #define LAGRAPH_MAX(x,y) (((x) > (y)) ? (x) : (y)) #define LAGRAPH_MIN(x,y) (((x) < (y)) ? (x) : (y)) GrB_Info LAGraph_bfs_pushpull // push-pull BFS, or push-only if AT = NULL ( GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i. // if NULL, the parent is not computed. GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Matrix AT, // transpose of A (optional; push-only if NULL) int64_t source, // starting node of the BFS int64_t max_level, // optional limit of # levels to search bool vsparse // if true, v is expected to be very sparse ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GrB_Vector q = NULL ; // nodes visited at each level GrB_Vector v = NULL ; // result vector GrB_Vector t = NULL ; // temporary vector GrB_Vector pi = NULL ; // parent vector if(v_output == NULL || (A == NULL && AT == NULL)) { // required output argument is missing LAGRAPH_ERROR("required arguments are NULL", GrB_NULL_POINTER) ; } (*v_output) = NULL ; bool compute_tree = (pi_output != NULL) ; GrB_Descriptor desc_s = GrB_DESC_S ; GrB_Descriptor desc_sc = GrB_DESC_SC ; GrB_Descriptor desc_rc = GrB_DESC_RC ; GrB_Descriptor desc_r = GrB_DESC_R ; GrB_Index nrows, ncols, nvalA, ignore, nvals ; // A is provided. AT may or may not be provided GrB_Matrix_nrows(&nrows, A) ; GrB_Matrix_ncols(&ncols, A) ; GrB_Matrix_nvals(&nvalA, A) ; bool use_vxm_with_A = true ; // push/pull requires both A and AT bool push_pull = (A != NULL && AT != NULL) ; if(nrows != ncols) { // A must be square LAGRAPH_ERROR("A must be square", GrB_NULL_POINTER) ; } //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- GrB_Index n = nrows ; int nthreads = Config_GetOMPThreadCount(); nthreads = LAGRAPH_MIN(n / 4096, nthreads) ; nthreads = LAGRAPH_MAX(nthreads, 1) ; // just traverse from the source node max_level = (max_level <= 0) ? n : LAGRAPH_MIN(n, max_level) ; // create an empty vector v GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ; GrB_Vector_new(&v, int_type, n) ; // make v dense if requested int64_t vlimit = LAGRAPH_MAX(256, sqrt((double) n)) ; if(!vsparse) { // v is expected to have many entries, so convert v to dense. // If the guess is wrong, v can be made dense later on. GrB_assign(v, NULL, NULL, 0, GrB_ALL, n, NULL) ; } GrB_Semiring first_semiring, second_semiring ; if(compute_tree) { // create an integer vector q, and set q(source) to source+1 GrB_Vector_new(&q, int_type, n) ; GrB_Vector_setElement(q, source + 1, source) ; if(n > INT32_MAX) { // terminates as soon as it finds any parent; nondeterministic first_semiring = GxB_ANY_FIRST_INT64 ; second_semiring = GxB_ANY_SECOND_INT64 ; } else { // terminates as soon as it finds any parent; nondeterministic first_semiring = GxB_ANY_FIRST_INT32 ; second_semiring = GxB_ANY_SECOND_INT32 ; } // create the empty parent vector GrB_Vector_new(&pi, int_type, n) ; if(!vsparse) { // make pi a dense vector of all zeros GrB_assign(pi, NULL, NULL, 0, GrB_ALL, n, NULL) ; } // pi (source) = source+1 denotes a root of the BFS tree GrB_Vector_setElement(pi, source + 1, source) ; } else { // create a boolean vector q, and set q(source) to true GrB_Vector_new(&q, GrB_BOOL, n) ; GrB_Vector_setElement(q, true, source) ; // terminates as soon as it finds any pair first_semiring = GxB_ANY_PAIR_BOOL ; second_semiring = GxB_ANY_PAIR_BOOL ; } // average node degree double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ; int64_t nvisited = 0 ; // # nodes visited so far GrB_Index nq = 1 ; // number of nodes in the current level //-------------------------------------------------------------------------- // BFS traversal and label the nodes //-------------------------------------------------------------------------- for(int64_t level = 1 ; ; level++) { //---------------------------------------------------------------------- // set v to the current level, for all nodes in q //---------------------------------------------------------------------- // v<q> = level: set v(i) = level for all nodes i in q GrB_assign(v, q, NULL, level, GrB_ALL, n, desc_s) ; //---------------------------------------------------------------------- // check if done //---------------------------------------------------------------------- nvisited += nq ; if(nq == 0 || nvisited == n || level >= max_level) break ; //---------------------------------------------------------------------- // check if v should be converted to dense //---------------------------------------------------------------------- if(vsparse && nvisited > vlimit) { // Convert v from sparse to dense to speed up the rest of the work. // If this case is triggered, it would have been faster to pass in // vsparse = false on input. // v <!v> = 0 GrB_assign(v, v, NULL, 0, GrB_ALL, n, desc_sc) ; GrB_Vector_nvals(&ignore, v) ; if(compute_tree) { // Convert pi from sparse to dense, to speed up the work. // pi<!pi> = 0 GrB_assign(pi, pi, NULL, 0, GrB_ALL, n, desc_sc) ; GrB_Vector_nvals(&ignore, pi) ; } vsparse = false ; } //---------------------------------------------------------------------- // select push vs pull //---------------------------------------------------------------------- if(push_pull) { double pushwork = d * nq ; double expected = (double) n / (double)(nvisited + 1) ; double per_dot = LAGRAPH_MIN(d, expected) ; double binarysearch = (3 * (1 + log2((double) nq))) ; double pullwork = (n - nvisited) * per_dot * binarysearch ; use_vxm_with_A = (pushwork < pullwork) ; } //---------------------------------------------------------------------- // q = next level of the BFS //---------------------------------------------------------------------- if(use_vxm_with_A) { // q'<!v> = q'*A // this is a push step if A is in CSR format; pull if CSC GrB_vxm(q, v, NULL, first_semiring, q, A, desc_rc) ; } else { // q<!v> = AT*q // this is a pull step if AT is in CSR format; push if CSC GrB_mxv(q, v, NULL, second_semiring, AT, q, desc_rc) ; } //---------------------------------------------------------------------- // move to next level //---------------------------------------------------------------------- if(compute_tree) { //------------------------------------------------------------------ // assign parents //------------------------------------------------------------------ // q(i) currently contains the parent of node i in tree (off by one // so it won't have any zero values, for valued mask). // pi<q> = q GrB_assign(pi, q, NULL, q, GrB_ALL, n, desc_s) ; //------------------------------------------------------------------ // replace q with current node numbers //------------------------------------------------------------------ // TODO this could be a unaryop // q(i) = i+1 for all entries in q. GrB_Index *qi ; if(n > INT32_MAX) { int64_t *qx ; GxB_Vector_export(&q, &int_type, &n, &nq, &qi, (void **)(&qx), NULL) ; int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ; nth = LAGRAPH_MAX(nth, 1) ; #pragma omp parallel for num_threads(nth) schedule(static) for(int64_t k = 0 ; k < nq ; k++) { qx [k] = qi [k] + 1 ; } GxB_Vector_import(&q, int_type, n, nq, &qi, (void **)(&qx), NULL) ; } else { int32_t *qx ; GxB_Vector_export(&q, &int_type, &n, &nq, &qi, (void **)(&qx), NULL) ; int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ; nth = LAGRAPH_MAX(nth, 1) ; #pragma omp parallel for num_threads(nth) schedule(static) for(int32_t k = 0 ; k < nq ; k++) { qx [k] = qi [k] + 1 ; } GxB_Vector_import(&q, int_type, n, nq, &qi, (void **)(&qx), NULL) ; } } else { //------------------------------------------------------------------ // count the nodes in the current level //------------------------------------------------------------------ GrB_Vector_nvals(&nq, q) ; } } //-------------------------------------------------------------------------- // return the parent vector, if computed //-------------------------------------------------------------------------- if(compute_tree) { (*pi_output) = pi ; pi = NULL ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- (*v_output) = v ; // return result v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it LAGRAPH_FREE_ALL ; // free all workspace (except for result v) return (GrB_SUCCESS) ; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-4,6),ceild(8*t2-Nz-11,24));t3<=min(floord(4*Nt+Ny-9,24),floord(4*t1+Ny-1,24));t3++) { for (t4=max(max(ceild(t1-254,256),ceild(8*t2-Nz-1011,1024)),ceild(24*t3-Ny-1011,1024));t4<=min(min(floord(4*Nt+Nx-9,1024),floord(4*t1+Nx-1,1024)),floord(24*t3+Nx+11,1024));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),6*t3+4),256*t4+254);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
deconvolution_pack16to8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack16to8_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m256 _sum = _mm256_setzero_ps(); if (bias_data_ptr) { _sum = _mm256_loadu_ps(bias_data_ptr + p * 8); } const float* kptr = weight_data_packed.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * 16; int k = (y * kernel_w + x) * 128; __m256 _val0 = _mm256_broadcast_ss(sptr); __m256 _val1 = _mm256_broadcast_ss(sptr + 1); __m256 _val2 = _mm256_broadcast_ss(sptr + 2); __m256 _val3 = _mm256_broadcast_ss(sptr + 3); __m256 _val4 = _mm256_broadcast_ss(sptr + 4); __m256 _val5 = _mm256_broadcast_ss(sptr + 5); __m256 _val6 = _mm256_broadcast_ss(sptr + 6); __m256 _val7 = _mm256_broadcast_ss(sptr + 7); __m256 _val8 = _mm256_broadcast_ss(sptr + 8); __m256 _val9 = _mm256_broadcast_ss(sptr + 9); __m256 _vala = _mm256_broadcast_ss(sptr + 10); __m256 _valb = _mm256_broadcast_ss(sptr + 11); __m256 _valc = _mm256_broadcast_ss(sptr + 12); __m256 _vald = _mm256_broadcast_ss(sptr + 13); __m256 _vale = _mm256_broadcast_ss(sptr + 14); __m256 _valf = _mm256_broadcast_ss(sptr + 15); __m256 _w0 = _mm256_load_ps(kptr + k); __m256 _w1 = _mm256_load_ps(kptr + k + 8); __m256 _w2 = _mm256_load_ps(kptr + k + 8 * 2); __m256 _w3 = _mm256_load_ps(kptr + k + 8 * 3); __m256 _w4 = _mm256_load_ps(kptr + k + 8 * 4); __m256 _w5 = _mm256_load_ps(kptr + k + 8 * 5); __m256 _w6 = _mm256_load_ps(kptr + k + 8 * 6); __m256 _w7 = _mm256_load_ps(kptr + k + 8 * 7); __m256 _w8 = _mm256_load_ps(kptr + k + 8 * 8); __m256 _w9 = _mm256_load_ps(kptr + k + 8 * 9); __m256 _wa = _mm256_load_ps(kptr + k + 8 * 10); __m256 _wb = _mm256_load_ps(kptr + k + 8 * 11); __m256 _wc = _mm256_load_ps(kptr + k + 8 * 12); __m256 _wd = _mm256_load_ps(kptr + k + 8 * 13); __m256 _we = _mm256_load_ps(kptr + k + 8 * 14); __m256 _wf = _mm256_load_ps(kptr + k + 8 * 15); _sum = _mm256_fmadd_ps(_val0, _w0, _sum); _sum = _mm256_fmadd_ps(_val1, _w1, _sum); _sum = _mm256_fmadd_ps(_val2, _w2, _sum); _sum = _mm256_fmadd_ps(_val3, _w3, _sum); _sum = _mm256_fmadd_ps(_val4, _w4, _sum); _sum = _mm256_fmadd_ps(_val5, _w5, _sum); _sum = _mm256_fmadd_ps(_val6, _w6, _sum); _sum = _mm256_fmadd_ps(_val7, _w7, _sum); _sum = _mm256_fmadd_ps(_val8, _w8, _sum); _sum = _mm256_fmadd_ps(_val9, _w9, _sum); _sum = _mm256_fmadd_ps(_vala, _wa, _sum); _sum = _mm256_fmadd_ps(_valb, _wb, _sum); _sum = _mm256_fmadd_ps(_valc, _wc, _sum); _sum = _mm256_fmadd_ps(_vald, _wd, _sum); _sum = _mm256_fmadd_ps(_vale, _we, _sum); _sum = _mm256_fmadd_ps(_valf, _wf, _sum); } } kptr += maxk * 128; } _sum = activation_avx(_sum, activation_type, activation_params); _mm256_storeu_ps(outptr, _sum); outptr += 8; } } } }
mipmap_core.c
#include <math.h> #include <stdlib.h> #include <memory.h> #include <stdio.h> #include "omp.h" /* C-OMP implementation of FGP-TV [1] denoising/regularization model (2D/3D case) * * Input Parameters: * 1. Noisy image/volume [REQUIRED] * 2. lambda - regularization parameter [REQUIRED] * 3. Number of iterations [OPTIONAL parameter] * 4. eplsilon: tolerance constant [OPTIONAL parameter] * 5. TV-type: 'iso' or 'l1' [OPTIONAL parameter] * 6. nonneg: 'nonnegativity (0 is OFF by default) [OPTIONAL parameter] * 7. print information: 0 (off) or 1 (on) [OPTIONAL parameter] * 8. P1 (dual variable from the previous outer iteration) [OPTIONAL parameter] * 9. P2 (dual variable from the previous outer iteration) [OPTIONAL parameter] * * Output: * [1] Filtered/regularized image * [2] last function value * [3] P1 (dual variable from the previous outer iteration) [if 8 is provided] * [4] P2 (dual variable from the previous outer iteration) [if 9 is provided] * * Example of image denoising: * figure; * Im = double(imread('lena_gray_256.tif'))/255; % loading image * u0 = Im + .05*randn(size(Im)); % adding noise * u = FGP_TV(single(u0), 0.05, 100, 1e-04); * * to compile with OMP support: gcc -shared -Wall -std=c99 -Wl,-soname,FGP_TV -fopenmp -o FGP_TV.so -fPIC FGP_TV.c * This function is based on the Matlab's code and paper by * [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems" * * D. Kazantsev, 2016-17 * */ float copyIm(float *A, float *B, int dimX, int dimY, int dimZ); float Obj_func2D(float *A, float *D, float *R1, float *R2, float lambda, int dimX, int dimY); float Grad_func2D(float *P1, float *P2, float *D, float *R1, float *R2, float lambda, int dimX, int dimY); float Proj_func2D(float *P1, float *P2, int methTV, int dimX, int dimY); float Rupd_func2D(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, int dimX, int dimY); float Obj_func_CALC2D(float *A, float *D, float *funcvalA, float lambda, int dimX, int dimY); void FGP_TV(float *A, float lambda, int iter, float epsil, int methTV, int nonneg, int printM, int dimX, int dimY, int dimZ, float *D) { int ll, j, count; float *D_old=NULL, *P1=NULL, *P2=NULL, *P1_old=NULL, *P2_old=NULL, *R1=NULL, *R2=NULL, tk, tkp1, re, re1; //A = (float *) mxGetData(prhs[0]); /*noisy image (2D/3D) */ //lambda = (float) mxGetScalar(prhs[1]); /* regularization parameters */ //iter = 100; /* default iterations number */ //epsil = 0.0001; /* default tolerance constant */ //methTV = 0; /* default isotropic TV penalty */ //nonneg = 0; /* nonnegativity (0 is OFF by default) */ //printM = 0; /* print information (0 is 0FF by default) */ /*output function value (last iteration) */ // plhs[1] = mxCreateNumericMatrix(1, 1, mxSINGLE_CLASS, mxREAL); //float *funcvalA = (float *) mxGetData(plhs[1]); // if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) {mexErrMsgTxt("The input image must be in a single precision"); } /* Handling Matlab output data*/ // dimX = dim_array[0]; dimY = dim_array[1]; dimZ = dim_array[2]; tk = 1.0f; tkp1=1.0f; count = 0; // re_old = 0.0f; D_old = (float*) calloc (dimY*dimX,sizeof(float)); P1 = (float*) calloc (dimY*dimX,sizeof(float)); P2 = (float*) calloc (dimY*dimX,sizeof(float)); P1_old = (float*) calloc (dimY*dimX,sizeof(float)); P2_old = (float*) calloc (dimY*dimX,sizeof(float)); R1 = (float*) calloc (dimY*dimX,sizeof(float)); R2 = (float*) calloc (dimY*dimX,sizeof(float)); /* begin iterations */ for(ll=0; ll<iter; ll++) { /* computing the gradient of the objective function */ Obj_func2D(A, D, R1, R2, lambda, dimX, dimY); if (nonneg == 1) { /* apply nonnegativity */ for(j=0; j<dimX*dimY*dimZ; j++) {if (D[j] < 0.0f) D[j] = 0.0f;} } /*Taking a step towards minus of the gradient*/ Grad_func2D(P1, P2, D, R1, R2, lambda, dimX, dimY); /* projection step */ Proj_func2D(P1, P2, methTV, dimX, dimY); /*updating R and t*/ tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; Rupd_func2D(P1, P1_old, P2, P2_old, R1, R2, tkp1, tk, dimX, dimY); /* calculate norm */ re = 0.0f; re1 = 0.0f; for(j=0; j<dimX*dimY*dimZ; j++) { re += pow(D[j] - D_old[j],2); re1 += pow(D[j],2); } re = sqrt(re)/sqrt(re1); if (re < epsil) count++; if (count > 4) { // Obj_func_CALC2D(A, D, funcvalA, lambda, dimX, dimY); break; } /* check that the residual norm is decreasing */ // if (ll > 2) { // if (re > re_old) { // Obj_func_CALC2D(A, D, funcvalA, lambda, dimX, dimY); // break; }} //re_old = re; /*printf("%f %i %i \n", re, ll, count); */ /*storing old values*/ copyIm(D, D_old, dimX, dimY, dimZ); copyIm(P1, P1_old, dimX, dimY, dimZ); copyIm(P2, P2_old, dimX, dimY, dimZ); tk = tkp1; /* calculating the objective function value */ //if (ll == (iter-1)) Obj_func_CALC2D(A, D, funcvalA, lambda, dimX, dimY); } if (nonneg == 1) { /* apply nonnegativity */ for(j=0; j<dimX*dimY*dimZ; j++) {if (D[j] < 0.0f) D[j] = 0.0f;} } // if (printM == 1) printf("FGP-TV iterations stopped at iteration %i with the function value %f \n", ll, funcvalA[0]); free(D_old);free(P1);free(P2);free(R1);free(R2);free(P1_old);free(P2_old); } float Obj_func_CALC2D(float *A, float *D, float *funcvalA, float lambda, int dimX, int dimY) { int i,j; float f1, f2, val1, val2; /*data-related term */ f1 = 0.0f; for(i=0; i<dimX*dimY; i++) f1 += pow(D[i] - A[i],2); /*TV-related term */ f2 = 0.0f; for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { /* boundary conditions */ if (i == dimX-1) {val1 = 0.0f;} else {val1 = A[(i+1)*dimY + (j)] - A[(i)*dimY + (j)];} if (j == dimY-1) {val2 = 0.0f;} else {val2 = A[(i)*dimY + (j+1)] - A[(i)*dimY + (j)];} f2 += sqrt(pow(val1,2) + pow(val2,2)); }} /* sum of two terms */ funcvalA[0] = 0.5f*f1 + lambda*f2; return *funcvalA; } float Obj_func2D(float *A, float *D, float *R1, float *R2, float lambda, int dimX, int dimY) { float val1, val2; int i, j; #pragma omp parallel for shared(A,D,R1,R2) private(i,j,val1,val2) for (i = 0; i<dimX; i++) { for (j = 0; j<dimY; j++) { /* boundary conditions */ if (i == 0) { val1 = 0.0f; } else { val1 = R1[(i - 1)*dimY + (j)]; } if (j == 0) { val2 = 0.0f; } else { val2 = R2[(i)*dimY + (j - 1)]; } D[(i)*dimY + (j)] = A[(i)*dimY + (j)] - lambda*(R1[(i)*dimY + (j)] + R2[(i)*dimY + (j)] - val1 - val2); } } return *D; } float Grad_func2D(float *P1, float *P2, float *D, float *R1, float *R2, float lambda, int dimX, int dimY) { float val1, val2, multip; int i, j; multip = (1.0f / (8.0f*lambda)); #pragma omp parallel for shared(P1,P2,D,R1,R2,multip) private(i,j,val1,val2) for (i = 0; i<dimX; i++) { for (j = 0; j<dimY; j++) { /* boundary conditions */ if (i == dimX - 1) val1 = 0.0f; else val1 = D[(i)*dimY + (j)] - D[(i + 1)*dimY + (j)]; if (j == dimY - 1) val2 = 0.0f; else val2 = D[(i)*dimY + (j)] - D[(i)*dimY + (j + 1)]; P1[(i)*dimY + (j)] = R1[(i)*dimY + (j)] + multip*val1; P2[(i)*dimY + (j)] = R2[(i)*dimY + (j)] + multip*val2; } } return 1; } float Proj_func2D(float *P1, float *P2, int methTV, int dimX, int dimY) { float val1, val2, denom; int i, j; if (methTV == 0) { /* isotropic TV*/ #pragma omp parallel for shared(P1,P2) private(i,j,denom) for (i = 0; i<dimX; i++) { for (j = 0; j<dimY; j++) { denom = pow(P1[(i)*dimY + (j)], 2) + pow(P2[(i)*dimY + (j)], 2); if (denom > 1) { P1[(i)*dimY + (j)] = P1[(i)*dimY + (j)] / sqrt(denom); P2[(i)*dimY + (j)] = P2[(i)*dimY + (j)] / sqrt(denom); } } } } else { /* anisotropic TV*/ #pragma omp parallel for shared(P1,P2) private(i,j,val1,val2) for (i = 0; i<dimX; i++) { for (j = 0; j<dimY; j++) { val1 = fabs(P1[(i)*dimY + (j)]); val2 = fabs(P2[(i)*dimY + (j)]); if (val1 < 1.0f) { val1 = 1.0f; } if (val2 < 1.0f) { val2 = 1.0f; } P1[(i)*dimY + (j)] = P1[(i)*dimY + (j)] / val1; P2[(i)*dimY + (j)] = P2[(i)*dimY + (j)] / val2; } } } return 1; } float Rupd_func2D(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, int dimX, int dimY) { int i, j; float multip; multip = ((tk - 1.0f) / tkp1); #pragma omp parallel for shared(P1,P2,P1_old,P2_old,R1,R2,multip) private(i,j) for (i = 0; i<dimX; i++) { for (j = 0; j<dimY; j++) { R1[(i)*dimY + (j)] = P1[(i)*dimY + (j)] + multip*(P1[(i)*dimY + (j)] - P1_old[(i)*dimY + (j)]); R2[(i)*dimY + (j)] = P2[(i)*dimY + (j)] + multip*(P2[(i)*dimY + (j)] - P2_old[(i)*dimY + (j)]); } } return 1; } /* General Functions */ /*****************************************************************/ /* Copy Image */ float copyIm(float *A, float *B, int dimX, int dimY, int dimZ) { int j; #pragma omp parallel for shared(A, B) private(j) for(j=0; j<dimX*dimY*dimZ; j++) B[j] = A[j]; return *B; }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator( llvm::function_ref<void(const Designation &)> CodeCompleteCB); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, SourceLocation *LParenLoc = nullptr, SourceLocation *RParenLoc = nullptr); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeName, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); return true; } return false; } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); NamedDecl * ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
GB_unaryop__minv_uint16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_uint32 // op(A') function: GB_tran__minv_uint16_uint32 // C type: uint16_t // A type: uint32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_uint32 ( uint16_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
LookupTable.h
#ifndef _LOOKUPTABLE_H_ #define _LOOKUPTABLE_H_ /* * LookupTable.h: * Lookup operation, for embeddings * * Created on: Apr 22, 2017 * Author: mszhang */ #include "SparseParam.h" #include "MyLib.h" #include "Alphabet.h" #include "Node.h" #include "Graph.h" #include "ModelUpdate.h" #include "profiler.h" class LookupTable { public: PAlphabet elems; SparseParam E; bool bFineTune; int nDim; int nVSize; int nUNKId; LookupTable() { nVSize = 0; nDim = 0; elems = NULL; nUNKId = -1; bFineTune = false; } //random initialization inline void initial(PAlphabet alpha, int dim, bool fineTune = true) { elems = alpha; nVSize = elems->size(); nUNKId = elems->from_string(unknownkey); initialWeights(dim, fineTune); } //initialization by pre-trained embeddings inline bool initial(PAlphabet alpha, const string& inFile, bool fineTune = true, dtype norm = -1) { elems = alpha; nVSize = elems->size(); nUNKId = elems->from_string(unknownkey); return initialWeights(inFile, fineTune, norm); } inline void initialWeights(int dim, bool tune) { if (nVSize == 0 || (nVSize == 1 && nUNKId >= 0)) { std::cout << "please check the alphabet" << std::endl; return; } nDim = dim; E.initial(nDim, nVSize); E.val.random(sqrt(1.0 / nDim)); //E.val.norm2one(); bFineTune = tune; #if USE_GPU E.val.copyFromHostToDevice(); #endif } // default should be fineTune, just for initialization inline bool initialWeights(const string& inFile, bool tune, dtype norm = -1) { if (nVSize == 0 || !elems->is_fixed() || (nVSize == 1 && nUNKId >= 0)) { std::cout << "please check the alphabet" << std::endl; return false; } ifstream inf; if (inf.is_open()) { inf.close(); inf.clear(); } inf.open(inFile.c_str()); if (!inf.is_open()) { std::cout << "please check the input file" << std::endl; return false; } string strLine, curWord; int wordId; vector<string> sLines; sLines.clear(); while (1) { if (!my_getline(inf, strLine)) { break; } if (!strLine.empty()) { sLines.push_back(strLine); } } inf.close(); if (sLines.size() == 0) { return false; } //find the first line, decide the wordDim; vector<string> vecInfo; split_bychar(sLines[0], vecInfo, ' '); nDim = vecInfo.size() - 1; E.initial(nDim, nVSize); std::cout << "word embedding dim is " << nDim << std::endl; bool bHasUnknown = false; unordered_set<int> indexers; NRVec<dtype> sum(nDim); sum = 0.0; int count = 0; for (int idx = 0; idx < sLines.size(); idx++) { split_bychar(sLines[idx], vecInfo, ' '); if (vecInfo.size() != nDim + 1) { std::cout << "error embedding file" << std::endl; } curWord = vecInfo[0]; //we assume the keys are normalized wordId = elems->from_string(curWord); if (wordId >= 0) { count++; if (nUNKId == wordId) { bHasUnknown = true; } indexers.insert(wordId); for (int idy = 0; idy < nDim; idy++) { dtype curValue = atof(vecInfo[idy + 1].c_str()); sum[idy] += curValue; E.val[wordId][idy] += curValue; } } } if (count == 0) { E.val.random(sqrt(3.0 / nDim)); #if USE_GPU E.val.copyFromHostToDevice(); #endif std::cout << "find no overlapped lexicons in the embedding file" << std::endl; return false; } if (nUNKId >= 0 && !bHasUnknown) { for (int idx = 0; idx < nDim; idx++) { E.val[nUNKId][idx] = sum[idx] / (count + 1); } indexers.insert(nUNKId); count++; std::cout << unknownkey << " not found, using averaged value to initialize." << std::endl; } int oovWords = 0; for (int id = 0; id < nVSize; id++) { if (indexers.find(id) == indexers.end()) { oovWords++; for (int idy = 0; idy < nDim; idy++) { E.val[id][idy] = nUNKId >= 0 ? E.val[nUNKId][idy] : sum[idy] / (count + 1); } } } std::cout << "OOV num is " << oovWords << ", total num is " << nVSize << ", embedding oov ratio is " << oovWords * 1.0 / nVSize << std::endl; std::cout << "unknown id" << nUNKId << std::endl; bFineTune = tune; if (norm > 0) { E.val.norm2one(norm); } #if USE_GPU E.val.copyFromHostToDevice(); #endif return true; } inline void exportAdaParams(ModelUpdate& ada) { if (bFineTune) { ada.addParam(&E); } } inline int getElemId(const string& strFeat) { return elems->from_string(strFeat); } inline void save(std::ofstream &os) const { E.save(os); os << bFineTune << std::endl; os << nDim << std::endl; os << nVSize << std::endl; os << nUNKId << std::endl; } //set alpha directly inline void load(std::ifstream &is, PAlphabet alpha) { E.load(is); is >> bFineTune; is >> nDim; is >> nVSize; is >> nUNKId; elems = alpha; } }; class LookupNode : public Node { public: LookupTable* param; int xid; LookupNode() { xid = -1; param = NULL; node_type = "lookup"; } inline void setParam(LookupTable* paramInit) { param = paramInit; } inline void clearValue() { Node::clearValue(); xid = -1; } //notice the output //this should be leaf nodes void forward(Graph *cg, const string& strNorm) { assert(param != NULL); xid = param->getElemId(strNorm); if (xid < 0 && param->nUNKId >= 0) { xid = param->nUNKId; } if (param->bFineTune && xid < 0) { std::cout << "Caution: unknown words are not modeled !" << std::endl; } degree = 0; cg->addNode(this); } inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding bool typeEqual(PNode other) override { bool result = Node::typeEqual(other); if (!result) return false; LookupNode* conv_other = (LookupNode*)other; if (param != conv_other->param) { return false; } return true; } size_t typeHashCode() const override { return Node::typeHashCode() ^ ::typeHashCode(param); } // for which do no require merge void compute() { if (xid >= 0) { param->E.value(xid, val); } else { val.zero(); } } void backward() { assert(param != NULL); if (xid == param->nUNKId || (xid >= 0 && param->bFineTune)) { param->E.loss(xid, loss); } } }; #if USE_GPU class LookupExecute :public Execute { public: int dim; Tensor2D drop_mask; LookupTable *table; std::vector<int> xids; inline void forward() { int count = batch.size(); drop_mask.init(dim, count); CalculateDropMask(count, dim, drop_mask); xids.reserve(count); std::vector<dtype*> vals; vals.reserve(count); for (int idx = 0; idx < count; idx++) { LookupNode *n = static_cast<LookupNode*>(batch[idx]); xids.push_back(n->xid); vals.push_back(n->val.value); } n3ldg_cuda::LookupForward(xids, table->E.val.value, bTrain, drop_mask.value, dynamicDropValue(), count, dim, vals); #if TEST_CUDA drop_mask.copyFromDeviceToHost(); for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); if (batch.at(0) > 0) { for (int i = 0; i < count; ++i) { for (int j = 0; j < dim; ++j) { dtype v = drop_mask[j][i]; batch[i]->drop_mask[j] = v <= dynamicDropValue() ? 0 : 1; } } } batch[idx]->forward_drop(bTrain, drop_factor); int xid = static_cast<LookupNode*>(batch[idx])->xid; n3ldg_cuda::Assert(batch[idx]->val.verify("lookup forward")); } #endif } inline void backward() { int count = batch.size(); std::vector<dtype*> losses; losses.reserve(count); for (Node *n : batch) { losses.push_back(n->loss.value); } n3ldg_cuda::LookupBackward(xids, table->nUNKId, table->bFineTune, losses, drop_mask.value, dynamicDropValue(), count, dim, table->E.grad.value, table->E.dIndexers.value); #if TEST_CUDA for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } n3ldg_cuda::Assert(table->E.grad.verify("lookup backward grad")); n3ldg_cuda::Assert(n3ldg_cuda::Verify(table->E.indexers.c_buf(), table->E.dIndexers.value, table->E.dIndexers.len, "lookup backward index")); #endif } }; #else class LookupExecute :public Execute { public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } }; #endif PExecute LookupNode::generate(bool bTrain, dtype cur_drop_factor) { LookupExecute* exec = new LookupExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; #if USE_GPU exec->table = param; exec->dim = dim; #endif return exec; } #endif /*_LOOKUPTABLE_H*/
GB_unaryop__minv_int8_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int8_int32 // op(A') function: GB_tran__minv_int8_int32 // C type: int8_t // A type: int32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int8_int32 ( int8_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
6_readers_writers.c
/* Program : 6 Author : Anish Topic : Write a C program using OpenMP features to implement one reader and one writer threads. The reader thread should display the value of a global variable, whereas the writer thread should increment the value of the global variable. Both the threads should run infinitely. */ #include<stdio.h> #include<omp.h> int main() { int a=10,id; omp_set_dynamic(0); #pragma omp parallel num_threads(2) { id=omp_get_thread_num(); if(id==0) //reader { while(1) { #pragma omp critical { printf("\n READER THREAD %d",a); } } } else { while(1) //writer { #pragma omp critical { ++a; printf("\n WRITER THREAD"); } } } } return 0; }
integrators.h
#pragma once #include "geometries.h" namespace integrators{ struct VelocityVerlet{ //template <void (*boundary_func)(Atom &a)> static inline void first_step(Particles &particles, Geometry *geometry) { /*! * Velocity Verlet integrator * First half step */ #pragma omp parallel for if(particles.atoms.numOfAtoms > 6000) for (int i = 0; i < particles.atoms.numOfAtoms; i++) { geometry->boundary(particles.atoms[i]); particles.atoms[i]->vel += 0.5 * Base::tStep * particles.atoms[i]->oldForce / particles.atoms[i]->mass; //[nm/ps] particles.atoms[i]->pos += Base::tStep * particles.atoms[i]->vel; particles.atoms[i]->pos = particles.atoms[i]->pos.cwiseProduct( Base::dimensionality); //Multiply with dimensionality if (particles.atoms[i]->pos.norm() > sqrt(3) * Base::boxDim + 1) { printf("\nAtom outside box\n"); std::cout << particles.atoms[i]->pos << std::endl; exit(1); } } } static inline void second_step(Particles &particles) { /*! * Velocity Verlet integrator * Second half step */ #pragma omp parallel for if(particles.atoms.numOfAtoms > 6000) for (int i = 0; i < particles.atoms.numOfAtoms; i++) { particles.atoms[i]->vel += 0.5 * Base::tStep * particles.atoms[i]->force / particles.atoms[i]->mass; particles.atoms[i]->oldForce = particles.atoms[i]->force; } } }; }
GB_unop__signum_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__signum_fp64_fp64) // op(A') function: GB (_unop_tran__signum_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = GB_signum (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_signum (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = GB_signum (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__signum_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = GB_signum (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = GB_signum (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__signum_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__ne_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__ne_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__ne_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__ne_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__ne_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__ne_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fc32) // C=scalar+B GB (_bind1st__ne_fc32) // C=scalar+B' GB (_bind1st_tran__ne_fc32) // C=A+scalar GB (_bind2nd__ne_fc32) // C=A'+scalar GB (_bind2nd_tran__ne_fc32) // C type: bool // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_ne (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = (crealf (GBX (Ax, pA, A_iso)) != 0) || (cimagf (GBX (Ax, pA, A_iso)) != 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = (crealf (GBX (Bx, pB, B_iso)) != 0) || (cimagf (GBX (Bx, pB, B_iso)) != 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_ne (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_FC32 || GxB_NO_NE_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_ne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_ne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_ne (x, aij) ; \ } GrB_Info GB (_bind1st_tran__ne_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_ne (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__ne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
taskdep_if0.c
// RUN: %libomp-compile-and-run // REQUIRES: !abt #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "omp_my_sleep.h" int a = 0; void task1() { my_sleep(0.5); a = 10; } void task2() { a++; } int main(int argc, char** argv) { #pragma omp parallel shared(argc) num_threads(2) { #pragma omp single { #pragma omp task depend(out: a) task1(); #pragma omp task if(0) depend(inout: a) task2(); } } if (a != 11) { fprintf(stderr, "fail: expected 11, but a is %d\n", a); exit(1); } else { printf("pass\n"); } return 0; }
residualbased_elimination_builder_and_solver_with_constraints.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // // #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS) #define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS /* System includes */ #include <unordered_set> #include <unordered_map> /* External includes */ /* Project includes */ #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "utilities/sparse_matrix_multiplication_utility.h" #include "utilities/constraint_utilities.h" #include "input_output/logger.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedEliminationBuilderAndSolverWithConstraints * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * The system is build in the following manner. A T matrix is assembled and constant vector g is assembled too. The T matrix contains the relations of all the dofs of the system, even the nodes with no master/slave relation. Then the size is n_total x n_red * The relation u = T u_red * Then: * A_red = T^t A T * b_red = T^t (b - A g) * @todo There is a more efficient way to asemble the system, but more costly, which is the following. In this case T will be only a relation matrix between master and slave dofs. Then n_slave x n_master: us = T um + g * Separating into independent dofs, master ans slave dofs: * u = uu * um * us * A = Auu Aum Aus * Amu Amm Ams * Asu Asm Ass * b = bu * bm * bs * Finally: * A_red = Auu Aum + Aus T * Amu + T^t Asu Amm + T^t Ams^t + Ams T + T^t Ass T * b_red = bu - Aus g * bm - Ams g * * This system requires extra care and is more complicated and requires to compute the blocks properly * @author Vicente Mataix Ferrandiz */ template <class TSparseSpace, class TDenseSpace, class TLinearSolver > class ResidualBasedEliminationBuilderAndSolverWithConstraints : public ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ /// Pointer definition of ResidualBasedEliminationBuilderAndSolverWithConstraints KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolverWithConstraints); /// Definition of the base class typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; // The size_t types typedef std::size_t SizeType; typedef std::size_t IndexType; /// Definition of the classes from the base class typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodeType NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; /// Additional definitions typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType; typedef Element::EquationIdVectorType EquationIdVectorType; typedef Element::DofsVectorType DofsVectorType; typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType; /// DoF types definition typedef typename NodeType::DofType DofType; typedef typename DofType::Pointer DofPointerType; /// Set definition typedef std::unordered_set<IndexType> IndexSetType; /// Map definition typedef std::unordered_map<IndexType, IndexType> IndexMapType; /// MPC definitions typedef MasterSlaveConstraint MasterSlaveConstraintType; typedef typename MasterSlaveConstraint::Pointer MasterSlaveConstraintPointerType; typedef std::vector<IndexType> VectorIndexType; typedef Vector VectorType; ///@} ///@name Enum's ///@{ ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor. (with parameters) */ explicit ResidualBasedEliminationBuilderAndSolverWithConstraints( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) : BaseType(pNewLinearSystemSolver) { // Validate default parameters Parameters default_parameters = Parameters(R"( { "name" : "ResidualBasedEliminationBuilderAndSolverWithConstraints", "check_constraint_relation" : true, "reset_relation_matrix_each_iteration" : true })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); mCheckConstraintRelation = ThisParameters["check_constraint_relation"].GetBool(); mResetRelationMatrixEachIteration = ThisParameters["reset_relation_matrix_each_iteration"].GetBool(); } /** * @brief Default constructor */ explicit ResidualBasedEliminationBuilderAndSolverWithConstraints( typename TLinearSolver::Pointer pNewLinearSystemSolver, const bool CheckConstraintRelation = true, const bool ResetRelationMatrixEachIteration = false ) : BaseType(pNewLinearSystemSolver), mCheckConstraintRelation(CheckConstraintRelation), mResetRelationMatrixEachIteration(ResetRelationMatrixEachIteration) { } /** Destructor. */ ~ResidualBasedEliminationBuilderAndSolverWithConstraints() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void SetUpSystem(ModelPart& rModelPart) override { if(rModelPart.MasterSlaveConstraints().size() > 0) SetUpSystemWithConstraints(rModelPart); else BaseType::SetUpSystem(rModelPart); } /** * @brief Function to perform the build of the RHS. The vector could be sized as the total number * of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector */ void Build( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) BuildWithConstraints(pScheme, rModelPart, rA, rb); else BaseType::Build(pScheme, rModelPart, rA, rb); } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { if(rModelPart.MasterSlaveConstraints().size() > 0) BuildAndSolveWithConstraints(pScheme, rModelPart, A, Dx, b); else BaseType::BuildAndSolve(pScheme, rModelPart, A, Dx, b); } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& b) override { KRATOS_TRY if(rModelPart.MasterSlaveConstraints().size() > 0) BuildRHSWithConstraints(pScheme, rModelPart, b); else BaseType::BuildRHS(pScheme, rModelPart, b); KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) SetUpDofSetWithConstraints(pScheme, rModelPart); else BaseType::SetUpDofSet(pScheme, rModelPart); } /** * @brief It applies certain operations at the system of equations at the begining of the solution step * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); // Getting process info const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Computing constraints const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin(); #pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin) for (int k = 0; k < n_constraints; ++k) { auto it = constraints_begin + k; it->InitializeSolutionStep(r_process_info); // Here each constraint constructs and stores its T and C matrices. Also its equation slave_ids. } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints failed to initialize solution step.") } /** * @brief It applies certain operations at the system of equations at the end of the solution step * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void FinalizeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb); // Getting process info const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Computing constraints const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin(); #pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin) for (int k = 0; k < n_constraints; ++k) { auto it = constraints_begin + k; it->FinalizeSolutionStep(r_process_info); } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints failed to finalize solution step.") } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedEliminationBuilderAndSolverWithConstraints"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ TSystemMatrixPointerType mpTMatrix = NULL; /// This is matrix containing the global relation for the constraints TSystemMatrixPointerType mpOldAMatrix = NULL; /// This is matrix containing the old LHS structure TSystemVectorPointerType mpConstantVector = NULL; /// This is vector containing the rigid movement of the constraint TSystemVectorPointerType mpDeltaConstantVector = NULL; /// This is vector contains the effective constant displacement DofsArrayType mDoFMasterFixedSet; /// The set containing the fixed master DoF of the system DofsArrayType mDoFSlaveSet; /// The set containing the slave DoF of the system SizeType mDoFToSolveSystemSize = 0; /// Number of degrees of freedom of the problem to actually be solved IndexMapType mReactionEquationIdMap; /// In order to know the corresponding EquaionId for each component of the reaction vector bool mCheckConstraintRelation = false; /// If we do a constraint check relation bool mResetRelationMatrixEachIteration = false; /// If we reset the relation matrix at each iteration bool mComputeConstantContribution = false; /// If we compute the constant contribution of the MPC bool mCleared = true; /// If the system has been reseted ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assembles the global relation matrix (T matrix used to impose the MPC) * @param rT The global relation matrix * @param rTransformationMatrix The local transformation contribution * @param rSlaveEquationId The equation id of the slave dofs * @param rMasterEquationId The equation id of the master dofs */ void AssembleRelationMatrix( TSystemMatrixType& rT, const LocalSystemMatrixType& rTransformationMatrix, const EquationIdVectorType& rSlaveEquationId, const EquationIdVectorType& rMasterEquationId ) { const SizeType local_size_1 = rTransformationMatrix.size1(); for (IndexType i_local = 0; i_local < local_size_1; ++i_local) { IndexType i_global = rSlaveEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { BaseType::AssembleRowContributionFreeDofs(rT, rTransformationMatrix, i_global, i_local, rMasterEquationId); } } } /** * @brief This method construcs the relationship between the DoF * @param pScheme The integration scheme * @param rA The LHS of the system * @param rModelPart The model part which defines the problem */ void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rA, ModelPart& rModelPart ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) ConstructMatrixStructureWithConstraints(pScheme, rA, rModelPart); else BaseType::ConstructMatrixStructure(pScheme, rA, rModelPart); } /** * @brief The same methods as the base class but with constraints * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void BuildAndSolveWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY Timer::Start("Build"); // We apply the master/slave relationship before build ApplyMasterSlaveRelation(pScheme, rModelPart, rA, rDx, rb); // We compute the effective constant vector TSystemVectorType dummy_Dx(mDoFToSolveSystemSize); TSparseSpace::SetToZero(dummy_Dx); ComputeEffectiveConstant(pScheme, rModelPart, dummy_Dx); // We do the build (after that we resize the solution vector to avoid problems) BuildWithConstraints(pScheme, rModelPart, rA, rb); Timer::Stop("Build"); // Now we apply the BC rDx.resize(mDoFToSolveSystemSize, false); ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; // We solve the system of equations const double start_solve = OpenMPUtils::GetCurrentTime(); Timer::Start("Solve"); SystemSolveWithPhysics(rA, rDx, rb, rModelPart); Timer::Stop("Solve"); const double stop_solve = OpenMPUtils::GetCurrentTime(); // We compute the effective constant vector ComputeEffectiveConstant(pScheme, rModelPart, rDx); // We reconstruct the Unknowns vector and the residual const double start_reconstruct_slaves = OpenMPUtils::GetCurrentTime(); ReconstructSlaveSolutionAfterSolve(pScheme, rModelPart, rA, rDx, rb); const double stop_reconstruct_slaves = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Reconstruct slaves time: " << stop_reconstruct_slaves - start_reconstruct_slaves << std::endl; // Some verbosity KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; KRATOS_CATCH("") } /** * @brief The same methods as the base class but with constraints * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rb The RHS vector of the system of equations */ void BuildRHSWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { Timer::Start("Build RHS"); // Resetting to zero the vector of reactions if(BaseType::mCalculateReactionsFlag) { TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); } // Builing without BC BuildRHSNoDirichlet(pScheme,rModelPart,rb); Timer::Stop("Build RHS"); ApplyDirichletConditionsRHS(pScheme, rModelPart, rb); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // Reconstruct the RHS TSystemVectorType rb_copy = rb; rb.resize(BaseType::mEquationSystemSize, false); TSparseSpace::Mult(rTMatrix, rb_copy, rb); // Adding contribution to reactions TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; if (BaseType::mCalculateReactionsFlag) { for (auto& r_dof : BaseType::mDofSet) { const bool is_master_fixed = mDoFMasterFixedSet.find(r_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(r_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof const IndexType equation_id = r_dof.EquationId(); r_reactions_vector[mReactionEquationIdMap[equation_id]] += rb[equation_id]; } } } // Some verbosity KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nRHS vector = " << rb << std::endl; } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element and condition its Dofs. * @details Equivalent to the ResidualBasedEliminationBuilderAndSolver but with constraints. The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSetWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) { KRATOS_TRY; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl; DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations typedef std::unordered_set < DofPointerType, DofPointerHasher> set_type; // Declaring temporal variables DofsArrayType dof_temp_all, dof_temp_solvable, dof_temp_slave; // We assign an empty dof array to our dof sets BaseType::mDofSet = DofsArrayType(); /// This corresponds with all the DoF of the system mDoFSlaveSet = DofsArrayType(); /// This corresponds with the slave (the ones not solved after compacting the system using MPC) /** * Here we declare three sets. * - The global set: Contains all the DoF of the system * - The slave set: The DoF that are not going to be solved, due to MPC formulation */ set_type dof_global_set, dof_global_slave_set; #pragma omp parallel firstprivate(dof_list, second_dof_list) { ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We cleate the temporal set and we reserve some space on them set_type dofs_tmp_set, dof_temp_slave_set; dofs_tmp_set.reserve(20000); dof_temp_slave_set.reserve(200); // Gets the array of elements from the modeler ElementsArrayType& r_elements_array = rModelPart.Elements(); const int number_of_elements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_elements; ++i) { auto it_elem = r_elements_array.begin() + i; // Gets list of Dof involved on every element pScheme->GetElementalDofList(*(it_elem.base()), dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of conditions from the modeler ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); const int number_of_conditions = static_cast<int>(r_conditions_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_conditions; ++i) { auto it_cond = r_conditions_array.begin() + i; // Gets list of Dof involved on every element pScheme->GetConditionDofList(*(it_cond.base()), dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of constraints from the modeler auto& r_constraints_array = rModelPart.MasterSlaveConstraints(); const int number_of_constraints = static_cast<int>(r_constraints_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_constraints; ++i) { auto it_const = r_constraints_array.begin() + i; // Gets list of Dof involved on every element it_const->GetDofList(dof_list, second_dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end()); dof_temp_slave_set.insert(dof_list.begin(), dof_list.end()); } // We merge all the sets in one thread #pragma omp critical { dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end()); dof_global_slave_set.insert(dof_temp_slave_set.begin(), dof_temp_slave_set.end()); } } KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl; /// We transfer the temporal sets to our DoF set dof_temp_all.reserve(dof_global_set.size()); for (auto& dof : dof_global_set) { dof_temp_all.push_back( dof.get() ); } dof_temp_all.Sort(); BaseType::mDofSet = dof_temp_all; dof_temp_slave.reserve(dof_global_slave_set.size()); for (auto& dof : dof_global_slave_set) { dof_temp_slave.push_back( dof.get() ); } dof_temp_slave.Sort(); mDoFSlaveSet = dof_temp_slave; // Throws an exception if there are no Degrees Of Freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", mDoFSlaveSet.size() == 0) << "No slave degrees of freedom to solve!" << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl; #ifdef USE_LOCKS_IN_ASSEMBLY KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Initializing lock array" << std::endl; if (BaseType::mLockArray.size() != 0) { for (int i = 0; i < static_cast<int>(BaseType::mLockArray.size()); ++i) { omp_destroy_lock(&BaseType::mLockArray[i]); } } BaseType::mLockArray.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(BaseType::mLockArray.size()); ++i) { omp_init_lock(&BaseType::mLockArray[i]); } KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl; #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id()<< std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb, ModelPart& rModelPart ) { KRATOS_TRY double norm_b = 0.0; if (TSparseSpace::Size(rb) > 0) norm_b = TSparseSpace::TwoNorm(rb); if (norm_b > 0.0) { // Create the auxiliar dof set DofsArrayType aux_dof_set; aux_dof_set.reserve(mDoFToSolveSystemSize); for (auto& r_dof : BaseType::mDofSet) { if (r_dof.EquationId() < BaseType::mEquationSystemSize) { auto it = mDoFSlaveSet.find(r_dof); if (it == mDoFSlaveSet.end()) aux_dof_set.push_back( &r_dof ); } } aux_dof_set.Sort(); KRATOS_ERROR_IF_NOT(aux_dof_set.size() == mDoFToSolveSystemSize) << "Inconsistency (I) in system size: " << mDoFToSolveSystemSize << " vs " << aux_dof_set.size() << "\n Size dof set " << BaseType::mDofSet.size() << " vs Size slave dof set " << mDoFSlaveSet.size() << std::endl; KRATOS_ERROR_IF_NOT(aux_dof_set.size() == rA.size1()) << "Inconsistency (II) in system size: " << rA.size1() << " vs " << aux_dof_set.size() << "\n Size dof set " << BaseType::mDofSet.size() << " vs Size slave dof set " << mDoFSlaveSet.size() << std::endl; // Provide physical data as needed if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(rA, rDx, rb, aux_dof_set, rModelPart); // Do solve BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief This function is exactly same as the ConstructMatrixStructure() function in base class except that the function * @details Has the call to ApplyConstraints function call once the element and conditions compute their equation ids * @todo Move this method to a common class with block builder and solver with constraints */ virtual void ConstructMatrixStructureWithConstraints( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rA, ModelPart& rModelPart ) { // Filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); // The total number of dof of the system const SizeType equation_size = BaseType::mEquationSystemSize; // This vector contains the indexes sets for all rows std::vector<IndexSetType> indices(equation_size); // We reserve some indexes on each row #pragma omp parallel for firstprivate(equation_size) for (int index = 0; index < static_cast<int>(equation_size); ++index) indices[index].reserve(40); /// Definition of the eqautio id vector type EquationIdVectorType ids(3, 0); EquationIdVectorType second_ids(3, 0); // NOTE: Used only on the constraints to take into account the master dofs #pragma omp parallel firstprivate(ids, second_ids) { // The process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We repeat the same declaration for each thead std::vector<IndexSetType> temp_indexes(equation_size); #pragma omp for for (int index = 0; index < static_cast<int>(equation_size); ++index) temp_indexes[index].reserve(30); // Getting the size of the array of elements from the model const int number_of_elements = static_cast<int>(rModelPart.Elements().size()); // Element initial iterator const auto el_begin = rModelPart.ElementsBegin(); // We iterate over the elements #pragma omp for schedule(guided, 512) nowait for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) { auto it_elem = el_begin + i_elem; pScheme->EquationId( *(it_elem.base()), ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } // Getting the size of the array of the conditions const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size()); // Condition initial iterator const auto cond_begin = rModelPart.ConditionsBegin(); // We iterate over the conditions #pragma omp for schedule(guided, 512) nowait for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) { auto it_cond = cond_begin + i_cond; pScheme->Condition_EquationId( *(it_cond.base()), ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } // Getting the size of the array of the constraints const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); // Constraint initial iterator const auto const_begin = rModelPart.MasterSlaveConstraints().begin(); // We iterate over the constraints #pragma omp for schedule(guided, 512) nowait for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if( it_const->IsDefined(ACTIVE) ) { constraint_is_active = it_const->Is(ACTIVE); } if(constraint_is_active) { it_const->EquationIdVector(ids, second_ids, r_current_process_info); // Slave DoFs for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } // Master DoFs for (auto& id_i : second_ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : second_ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } } // Merging all the temporal indexes #pragma omp critical { for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) { indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end()); } } } // Count the row sizes SizeType nnz = 0; for (IndexType i = 0; i < indices.size(); ++i) nnz += indices[i].size(); rA = CompressedMatrixType(indices.size(), indices.size(), nnz); double *Avalues = rA.value_data().begin(); IndexType *Arow_indices = rA.index1_data().begin(); IndexType *Acol_indices = rA.index2_data().begin(); // Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(rA.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(rA.size1()); ++i) { const IndexType row_begin = Arow_indices[i]; const IndexType row_end = Arow_indices[i + 1]; IndexType k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); ++it) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } indices[i].clear(); //deallocating the memory std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } rA.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } /** * @brief This function is exactly same as the ConstructMatrixStructure() function in base class except that the function has the call to ApplyConstraints function call once the element and conditions compute their equation slave_ids * @param pScheme The pointer to the integration scheme * @param rT The global relation matrix * @param rModelPart The model part to compute */ virtual void ConstructRelationMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rT, ModelPart& rModelPart ) { // Filling with zero the matrix (creating the structure) Timer::Start("RelationMatrixStructure"); IndexMapType solvable_dof_reorder; std::unordered_map<IndexType, IndexSetType> master_indices; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; typedef std::pair<IndexType, IndexSetType> IndexIndexSetPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); master_indices.insert(IndexIndexSetPairType(equation_id, IndexSetType({counter}))); ++counter; } else { master_indices.insert(IndexIndexSetPairType(equation_id, IndexSetType({}))); } } } // The process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); /// Definition of the eqautio id vector type EquationIdVectorType ids(3, 0); EquationIdVectorType second_ids(3, 0); // NOTE: Used only on the constraints to take into account the master dofs const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin(); // TODO: OMP for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = it_const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if( it_const->IsDefined(ACTIVE) ) { constraint_is_active = it_const->Is(ACTIVE); } if(constraint_is_active) { it_const->EquationIdVector(ids, second_ids, r_current_process_info); for (auto& slave_id : ids) { if (slave_id < BaseType::mEquationSystemSize) { auto it_slave = solvable_dof_reorder.find(slave_id); if (it_slave == solvable_dof_reorder.end()) { for (auto& master_id : second_ids) { if (master_id < BaseType::mEquationSystemSize) { auto& master_row_indices = master_indices[slave_id]; master_row_indices.insert(solvable_dof_reorder[master_id]); } } } } } } } KRATOS_DEBUG_ERROR_IF_NOT(BaseType::mEquationSystemSize == master_indices.size()) << "Inconsistency in the dofs size: " << BaseType::mEquationSystemSize << "\t vs \t" << master_indices.size() << std::endl; // Count the row sizes SizeType nnz = 0; for (IndexType i = 0; i < BaseType::mEquationSystemSize; ++i) { nnz += master_indices[i].size(); } rT = CompressedMatrixType(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, nnz); double *Tvalues = rT.value_data().begin(); IndexType *Trow_indices = rT.index1_data().begin(); IndexType *Tcol_indices = rT.index2_data().begin(); // Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Trow_indices[0] = 0; for (IndexType i = 0; i < BaseType::mEquationSystemSize; ++i) Trow_indices[i + 1] = Trow_indices[i] + master_indices[i].size(); KRATOS_DEBUG_ERROR_IF_NOT(Trow_indices[BaseType::mEquationSystemSize] == nnz) << "Nonzero values does not coincide with the row index definition: " << Trow_indices[BaseType::mEquationSystemSize] << " vs " << nnz << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(rT.size1()); ++i) { const IndexType row_begin = Trow_indices[i]; const IndexType row_end = Trow_indices[i + 1]; IndexType k = row_begin; for (auto it = master_indices[i].begin(); it != master_indices[i].end(); ++it) { Tcol_indices[k] = *it; Tvalues[k] = 0.0; k++; } master_indices[i].clear(); //deallocating the memory std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]); } rT.set_filled(BaseType::mEquationSystemSize + 1, nnz); // Setting ones for (auto& solv_dof : solvable_dof_reorder) { rT(solv_dof.first, solv_dof.second) = 1.0; } Timer::Stop("RelationMatrixStructure"); } /** * @brief This function is exactly same as the Build() function in base class except that the function * @details It has the call to ApplyConstraints function call once the LHS or RHS are computed by elements and conditions * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector * @param UseBaseBuild If the abse Build function will be used */ void BuildWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb, const bool UseBaseBuild = true ) { KRATOS_TRY // We build the original system if (UseBaseBuild) BaseType::Build(pScheme, rModelPart, rA, rb); else BuildWithoutConstraints(pScheme, rModelPart, rA, rb); // Assemble the constraints const double start_build = OpenMPUtils::GetCurrentTime(); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // We compute only once (or if cleared) if (mCleared) { mCleared = false; ComputeConstraintContribution(pScheme, rModelPart, true, mComputeConstantContribution); } else if (mResetRelationMatrixEachIteration) { ResetConstraintSystem(); ComputeConstraintContribution(pScheme, rModelPart, mResetRelationMatrixEachIteration, mComputeConstantContribution); } // We compute the transposed matrix of the global relation matrix TSystemMatrixType T_transpose_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, rTMatrix, 1.0); // The proper way to include the constants is in the RHS as T^t(f - A * g) TSystemVectorType rb_copy = rb; if (mComputeConstantContribution) { // We get the g constant vector TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSystemVectorType aux_constant_vector(rDeltaConstantVector); TSparseSpace::Mult(rA, rDeltaConstantVector, aux_constant_vector); TSparseSpace::UnaliasedAdd(rb_copy, -1.0, aux_constant_vector); } // The auxiliar matrix to store the intermediate matrix multiplication TSystemMatrixType auxiliar_A_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); // We do a backup of the matrix before apply the constraints if (mpOldAMatrix == NULL) { // If the pointer is not initialized initialize it to an empty matrix TSystemMatrixPointerType pNewOldAMatrix = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); mpOldAMatrix.swap(pNewOldAMatrix); } (*mpOldAMatrix).swap(rA); // We resize of system of equations rA.resize(mDoFToSolveSystemSize, mDoFToSolveSystemSize, false); rb.resize(mDoFToSolveSystemSize, false); // Final multiplication SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, rTMatrix, rA); TSparseSpace::Mult(T_transpose_matrix, rb_copy, rb); // Cleaning up memory auxiliar_A_matrix.resize(0, 0, false); T_transpose_matrix.resize(0, 0, false); const double stop_build = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Constraint relation build time and multiplication: " << stop_build - start_build << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building with constraints" << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rb The RHS of the system */ void BuildRHSNoDirichlet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { KRATOS_TRY // Assemble the constraints const double start_build = OpenMPUtils::GetCurrentTime(); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // We compute only once (or if cleared) if (mCleared) { mCleared = false; ComputeConstraintContribution(pScheme, rModelPart, true, mComputeConstantContribution); } else if (mResetRelationMatrixEachIteration) { ResetConstraintSystem(); ComputeConstraintContribution(pScheme, rModelPart, mResetRelationMatrixEachIteration, mComputeConstantContribution); } // We compute the transposed matrix of the global relation matrix TSystemMatrixType T_transpose_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, rTMatrix, 1.0); // We build the original system TSystemMatrixType A; // Dummy auxiliar matrix we ned to build anyway because are needed to impose the rigid displacements if (mComputeConstantContribution) { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); BuildWithoutConstraints(pScheme, rModelPart, A, rb); } else { BuildRHSNoDirichletWithoutConstraints(pScheme, rModelPart, rb); } // The proper way to include the constants is in the RHS as T^t(f - A * g) TSystemVectorType rb_copy = rb; if (mComputeConstantContribution) { // We get the g constant vector TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSystemVectorType aux_constant_vector(rDeltaConstantVector); TSparseSpace::Mult(A, rDeltaConstantVector, aux_constant_vector); TSparseSpace::UnaliasedAdd(rb_copy, -1.0, aux_constant_vector); } rb.resize(mDoFToSolveSystemSize, false); // Final multiplication TSparseSpace::Mult(T_transpose_matrix, rb_copy, rb); const double stop_build = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Constraint relation build time and multiplication: " << stop_build - start_build << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building with constraints" << std::endl; KRATOS_CATCH("") } /** * @brief This method resize and initializes the system of euqations * @details Additionally what is done in the base class the constraints are initialized * @param pA The pointer to the LHS matrix * @param pDx The pointer to the vector of Unknowns * @param pb The pointer to the RHS vector * @param rModelPart The model part to be computed */ void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { // We resize the basic system BaseType::ResizeAndInitializeVectors(pScheme, pA, pDx, pb, rModelPart); // If needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag) { const SizeType reactions_vector_size = BaseType::mDofSet.size() - mDoFToSolveSystemSize + mDoFMasterFixedSet.size(); TSystemVectorType& rReactionsVector = *(BaseType::mpReactionsVector); if (rReactionsVector.size() != reactions_vector_size) rReactionsVector.resize(reactions_vector_size, false); } // Now we resize the relation matrix used on the MPC solution if(rModelPart.MasterSlaveConstraints().size() > 0) { if (mpTMatrix == NULL) { // If the pointer is not initialized initialize it to an empty matrix TSystemMatrixPointerType pNewT = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); mpTMatrix.swap(pNewT); } // The rigid movement if (mpConstantVector == NULL) { // If the pointer is not initialized initialize it to an empty vector TSystemVectorPointerType pNewConstantVector = TSystemVectorPointerType(new TSystemVectorType(0)); mpConstantVector.swap(pNewConstantVector); } // The effective rigid movement if (mpDeltaConstantVector == NULL) { // If the pointer is not initialized initialize it to an empty vector TSystemVectorPointerType pNewConstantVector = TSystemVectorPointerType(new TSystemVectorType(0)); mpDeltaConstantVector.swap(pNewConstantVector); } // System matrices/vectors TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; // Resizing the system matrix if (rTMatrix.size1() == 0 || BaseType::GetReshapeMatrixFlag() || mCleared) { // If the matrix is not initialized rTMatrix.resize(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, false); ConstructRelationMatrixStructure(pScheme, rTMatrix, rModelPart); } else { if (rTMatrix.size1() != BaseType::mEquationSystemSize || rTMatrix.size2() != mDoFToSolveSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rTMatrix.resize(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, false); ConstructRelationMatrixStructure(pScheme, rTMatrix, rModelPart); } } // Resizing the system vector // The rigid movement if (rConstantVector.size() != BaseType::mEquationSystemSize || BaseType::GetReshapeMatrixFlag() || mCleared) { rConstantVector.resize(BaseType::mEquationSystemSize, false); mComputeConstantContribution = ComputeConstraintContribution(pScheme, rModelPart); } else { if (rConstantVector.size() != BaseType::mEquationSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rConstantVector.resize(BaseType::mEquationSystemSize, false); mComputeConstantContribution = ComputeConstraintContribution(pScheme, rModelPart); } } // The effective rigid movement if (mComputeConstantContribution) { if (rDeltaConstantVector.size() != BaseType::mEquationSystemSize || BaseType::GetReshapeMatrixFlag() || mCleared) { rDeltaConstantVector.resize(BaseType::mEquationSystemSize, false); } else { if (rDeltaConstantVector.size() != BaseType::mEquationSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rDeltaConstantVector.resize(BaseType::mEquationSystemSize, false); } } } } } /** * @brief It computes the reactions of the system * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY // Refresh RHS to have the correct reactions BuildRHS(pScheme, rModelPart, rb); // Adding contribution to reactions TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; // Updating variables for (auto& r_dof : BaseType::mDofSet) { if ((r_dof.IsFixed()) || mDoFSlaveSet.find(r_dof) != mDoFSlaveSet.end()) { r_dof.GetSolutionStepReactionValue() = -r_reactions_vector[mReactionEquationIdMap[r_dof.EquationId()]]; } } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::CalculateReactions failed .."); } /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details In the base ResidualBasedEliminationBuilderAndSolver does nothing, due to the fact that the BC are automatically managed with the elimination. But in the constrints approach the slave DoF depending on fixed DoFs must be reconstructed * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; if (mDoFMasterFixedSet.size() > 0) { // We apply the same method as in the block builder and solver but instead of fixing the fixed Dofs, we just fix the master fixed Dofs std::vector<double> scaling_factors (mDoFToSolveSystemSize, 0.0); // NOTE: Dofs are assumed to be numbered consecutively const auto it_dof_begin = BaseType::mDofSet.begin(); IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it_first_check = mDoFSlaveSet.find(*it_dof); if (it_first_check == mDoFSlaveSet.end()) { auto it_second_check = mDoFSlaveSet.find(*it_dof); if (it_second_check == mDoFSlaveSet.end()) { if(mDoFMasterFixedSet.find(*it_dof) == mDoFMasterFixedSet.end()) { scaling_factors[counter] = 1.0; } } counter += 1; } } } double* Avalues = rA.value_data().begin(); IndexType* Arow_indices = rA.index1_data().begin(); IndexType* Acol_indices = rA.index2_data().begin(); // Detect if there is a line of all zeros and set the diagonal to a 1 if this happens #pragma omp parallel for for(int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { const IndexType col_begin = Arow_indices[k]; const IndexType col_end = Arow_indices[k+1]; bool empty = true; for (IndexType j = col_begin; j < col_end; ++j) { if(Avalues[j] != 0.0) { empty = false; break; } } if(empty) { rA(k,k) = 1.0; rb[k] = 0.0; } } #pragma omp parallel for for (int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { const IndexType col_begin = Arow_indices[k]; const IndexType col_end = Arow_indices[k+1]; const double k_factor = scaling_factors[k]; if (k_factor == 0) { // Zero out the whole row, except the diagonal for (IndexType j = col_begin; j < col_end; ++j) if (static_cast<int>(Acol_indices[j]) != k ) Avalues[j] = 0.0; // Zero out the RHS rb[k] = 0.0; } else { // Zero out the column which is associated with the zero'ed row for (IndexType j = col_begin; j < col_end; ++j) { if(scaling_factors[ Acol_indices[j] ] == 0 ) { Avalues[j] = 0.0; } } } } } KRATOS_CATCH(""); } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { BaseType::Clear(); // Reseting auxiliar set of dofs mDoFMasterFixedSet = DofsArrayType(); mDoFSlaveSet = DofsArrayType(); // Clearing the relation map mReactionEquationIdMap.clear(); // Clear constraint system if (mpTMatrix != nullptr) TSparseSpace::Clear(mpTMatrix); if (mpConstantVector != nullptr) TSparseSpace::Clear(mpConstantVector); if (mpDeltaConstantVector != nullptr) TSparseSpace::Clear(mpDeltaConstantVector); // Set the flag mCleared = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method computes the equivalent coounter part of the SetUpSystem when using constraints * @param rModelPart The model part of the problem to solve */ void SetUpSystemWithConstraints(ModelPart& rModelPart) { KRATOS_TRY // First we set up the system of equations without constraints // Set equation id for degrees of freedom the free degrees of freedom are positioned at the beginning of the system, while the fixed one are at the end (in opposite order). // // That means that if the EquationId is greater than "mEquationSystemSize" the pointed degree of freedom is restrained // This is almost the same SetUpSystem from ResidualBasedEliminationBuilderAndSolver, but we don't discard from the system the fixed dofs that are part of a constraint at the same time /// First we detect the master fixed DoFs /// // The current process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Vector containing the localization in the system of the different terms DofsVectorType slave_dof_list, master_dof_list; // Declaring temporal variables DofsArrayType dof_temp_fixed_master; typedef std::unordered_set < DofPointerType, DofPointerHasher> set_type; set_type dof_global_fixed_master_set; // Iterate over constraints const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin(); #pragma omp parallel firstprivate(slave_dof_list, master_dof_list) { // We cleate the temporal set and we reserve some space on them set_type dof_temp_fixed_master_set; dof_temp_fixed_master_set.reserve(2000); #pragma omp for schedule(guided, 512) nowait for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = it_const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if (it_const->IsDefined(ACTIVE)) constraint_is_active = it_const->Is(ACTIVE); if (constraint_is_active) { it_const->GetDofList(slave_dof_list, master_dof_list, r_current_process_info); // Filling the set of dofs master and fixed at the same time for (auto& master_dof : master_dof_list) { if (master_dof->IsFixed()) { dof_temp_fixed_master_set.insert(master_dof); } } } } // We merge all the sets in one thread #pragma omp critical { dof_global_fixed_master_set.insert(dof_temp_fixed_master_set.begin(), dof_temp_fixed_master_set.end()); } } dof_temp_fixed_master.reserve(dof_global_fixed_master_set.size()); for (auto& dof : dof_global_fixed_master_set) { dof_temp_fixed_master.push_back( dof.get() ); } dof_temp_fixed_master.Sort(); mDoFMasterFixedSet = dof_temp_fixed_master; /// Now we compute as expected /// int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (auto& dof : BaseType::mDofSet) { if (dof.IsFixed()) { auto it = mDoFMasterFixedSet.find(dof); if (it == mDoFMasterFixedSet.end()) { dof.SetEquationId(--fix_id); } else { dof.SetEquationId(free_id++); } } else { dof.SetEquationId(free_id++); } } BaseType::mEquationSystemSize = fix_id; // Add the computation of the global ids of the solvable dofs IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { ++counter; } } } // The total system of equations to be solved mDoFToSolveSystemSize = counter; // Finally we build the relation between the EquationID and the component of the reaction counter = 0; for (auto& r_dof : BaseType::mDofSet) { const bool is_master_fixed = mDoFMasterFixedSet.find(r_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(r_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof mReactionEquationIdMap.insert({r_dof.EquationId(), counter}); ++counter; } } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::SetUpSystemWithConstraints failed .."); } /** * @brief This method initializes the DoF using the master/slave relationship * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void ApplyMasterSlaveRelation( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY // First we reset the slave dofs ConstraintUtilities::ResetSlaveDofs(rModelPart); // Now we apply the constraints ConstraintUtilities::ApplyConstraints(rModelPart); KRATOS_CATCH(""); } /** * @brief This method checks that the master/slave relation is properly set * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rDx The vector of unkowns * @param rDxSolved The vector of unkowns actually solved */ bool CheckMasterSlaveRelation( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rDx, TSystemVectorType& rDxSolved ) { KRATOS_TRY // Auxiliar values const auto it_dof_begin = BaseType::mDofSet.begin(); TSystemVectorType current_solution(mDoFToSolveSystemSize); TSystemVectorType updated_solution(BaseType::mEquationSystemSize); TSystemVectorType residual_solution(BaseType::mEquationSystemSize); // Get current values IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { current_solution[counter] = it_dof->GetSolutionStepValue() + rDxSolved[counter]; counter += 1; } } } #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { residual_solution[equation_id] = it_dof->GetSolutionStepValue() + rDx[equation_id]; } } // Apply master slave constraints const TSystemMatrixType& rTMatrix = *mpTMatrix; TSparseSpace::Mult(rTMatrix, current_solution, updated_solution); if (mComputeConstantContribution) { ComputeConstraintContribution(pScheme, rModelPart, false, true); const TSystemVectorType& rConstantVector = *mpConstantVector; TSparseSpace::UnaliasedAdd(updated_solution, 1.0, rConstantVector); } TSparseSpace::UnaliasedAdd(residual_solution, -1.0, updated_solution); // Check database for(int k = 0; k < static_cast<int>(BaseType::mEquationSystemSize); ++k) { if (std::abs(residual_solution[k]) > std::numeric_limits<double>::epsilon()) return false; } return true; KRATOS_CATCH(""); } /** * @brief This method reconstructs the slave solution after Solving. * @param pScheme The pointer to the integration scheme * @param rModelPart Reference to the ModelPart containing the problem. * @param rA System matrix * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void ReconstructSlaveSolutionAfterSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY // We get the global T matrix and the constant vector const TSystemMatrixType& rTMatrix = *mpTMatrix; // We reconstruct the complete vector of Unknowns TSystemVectorType Dx_copy = rDx; rDx.resize(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, Dx_copy, rDx); // Add the constant vector if (mComputeConstantContribution) { const TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSparseSpace::UnaliasedAdd(rDx, 1.0, rDeltaConstantVector); } // We check the solution if (mCheckConstraintRelation) { KRATOS_ERROR_IF_NOT(CheckMasterSlaveRelation(pScheme, rModelPart, rDx, Dx_copy)) << "The relation between master/slave dofs is not respected" << std::endl; } // Simply restore old LHS (rA).swap(*mpOldAMatrix); mpOldAMatrix = NULL; // Reconstruct the RHS TSystemVectorType rb_copy = rb; rb.resize(BaseType::mEquationSystemSize, false); TSparseSpace::Mult(rTMatrix, rb_copy, rb); KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::ReconstructSlaveSolutionAfterSolve failed .."); } /** * @brief Function to perform the build the system without constraints * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector */ void BuildWithoutConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb ) { // The current process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Getting the array of elements ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditons_array = rModelPart.Conditions(); // Contributions to the system LocalSystemMatrixType lhs_contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType rhs_contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms Element::EquationIdVectorType equation_id; // Assemble all elements and conditions #pragma omp parallel firstprivate( lhs_contribution, rhs_contribution, equation_id) { // Elements const auto it_elem_begin = r_elements_array.begin(); const int nelements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i<nelements; ++i) { auto it_elem = it_elem_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental contribution pScheme->CalculateSystemContributions(*(it_elem.base()), lhs_contribution, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleWithoutConstraints(rA, rb, lhs_contribution, rhs_contribution, equation_id); // Clean local elemental memory pScheme->CleanMemory(*(it_elem.base())); } } // Conditions const auto it_cond_begin = r_conditons_array.begin(); const int nconditions = static_cast<int>(r_conditons_array.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; ++i) { auto it_cond = it_cond_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*(it_cond.base()), lhs_contribution, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleWithoutConstraints(rA, rb, lhs_contribution, rhs_contribution, equation_id); // Clean local elemental memory pScheme->CleanMemory(*(it_cond.base())); } } } } /** * @brief Function to perform the build of the RHS without constraints * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rb The RHS of the system */ void BuildRHSNoDirichletWithoutConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { // The current process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Getting the array of elements ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditons_array = rModelPart.Conditions(); // Contributions to the system LocalSystemVectorType rhs_contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms Element::EquationIdVectorType equation_id; // Assemble all elements and conditions #pragma omp parallel firstprivate( rhs_contribution, equation_id) { // Elements const auto it_elem_begin = r_elements_array.begin(); const int nelements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i<nelements; ++i) { auto it_elem = it_elem_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*(it_elem.base()), rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHSWithoutConstraints(rb, rhs_contribution, equation_id); } } // Conditions const auto it_cond_begin = r_conditons_array.begin(); const int nconditions = static_cast<int>(r_conditons_array.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; ++i) { auto it_cond = it_cond_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*(it_cond.base()), rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHSWithoutConstraints(rb, rhs_contribution, equation_id); } } } } /** * @brief This function does the assembling of the LHS and RHS * @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling */ void AssembleWithoutConstraints( TSystemMatrixType& rA, TSystemVectorType& rb, const LocalSystemMatrixType& rLHSContribution, const LocalSystemVectorType& rRHSContribution, const Element::EquationIdVectorType& rEquationId ) { const SizeType local_size = rLHSContribution.size1(); // Assemble RHS AssembleRHSWithoutConstraints(rb, rRHSContribution, rEquationId); // Assemble LHS for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { BaseType::AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId); } } } /** * @brief Assembling local contribution of nodes and elements in the RHS * @param rb The RHS vector */ void AssembleRHSWithoutConstraints( TSystemVectorType& rb, const LocalSystemVectorType& rRHSContribution, const Element::EquationIdVectorType& rEquationId ) { const SizeType local_size = rRHSContribution.size(); if (!BaseType::mCalculateReactionsFlag) { for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { // free dof // ASSEMBLING THE SYSTEM VECTOR double& r_b_value = rb[i_global]; const double rhs_value = rRHSContribution[i_local]; #pragma omp atomic r_b_value += rhs_value; } } } else { TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; auto it_dof = BaseType::mDofSet.begin() + i_global; const bool is_master_fixed = mDoFMasterFixedSet.find(*it_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(*it_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof double& r_b_value = r_reactions_vector[mReactionEquationIdMap[i_global]]; const double rhs_value = rRHSContribution[i_local]; #pragma omp atomic r_b_value += rhs_value; } else if (it_dof->IsFree()) { // Free dof not in the MPC // ASSEMBLING THE SYSTEM VECTOR double& r_b_value = rb[i_global]; const double& rhs_value = rRHSContribution[i_local]; #pragma omp atomic r_b_value += rhs_value; } } } } /** * @brief This method set to zero the relation matrix */ void ResetConstraintSystem() { TSystemMatrixType& rTMatrix = *mpTMatrix; double *Tvalues = rTMatrix.value_data().begin(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(rTMatrix.nnz()); ++i) { Tvalues[i] = 0.0; } IndexMapType solvable_dof_reorder; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); ++counter; } } } // Setting ones for (auto& solv_dof : solvable_dof_reorder) { rTMatrix(solv_dof.first, solv_dof.second) = 1.0; } if (mComputeConstantContribution) { TSystemVectorType& rConstantVector = *mpConstantVector; TSparseSpace::SetToZero(rConstantVector); } } /** * @brief This method applies the BC, only in the RHS * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rb The RHS vector of the system of equations */ void ApplyDirichletConditionsRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { KRATOS_TRY; if (mDoFMasterFixedSet.size() > 0) { // NOTE: dofs are assumed to be numbered consecutively const auto it_dof_begin = BaseType::mDofSet.begin(); #pragma omp parallel for for(int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { auto it_dof = it_dof_begin + k; if (k < static_cast<int>(BaseType::mEquationSystemSize)) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { if(mDoFMasterFixedSet.find(*it_dof) != mDoFMasterFixedSet.end()) { rb[k] = 0.0; } } } } } KRATOS_CATCH(""); } /** * @brief This method computes the absolute constant contribution of the MPC * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param ComputeTranslationMatrix If the translation matrix will be assembled * @param ComputeConstantVector If the constant vector will be assembled * @return If there are constant constraints */ bool ComputeConstraintContribution( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, const bool ComputeTranslationMatrix = false, const bool ComputeConstantVector = false ) { KRATOS_TRY; // We build the global T matrix and the g constant vector TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; // Filling constant vector if (ComputeConstantVector) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mEquationSystemSize); ++i) { rConstantVector[i] = 0.0; } } // Auxiliar set to reorder master DoFs IndexMapType solvable_dof_reorder; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); ++counter; } } } // The current process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Initialize the constant vector double aux_constant_value = 0.0; // Contributions to the system LocalSystemMatrixType transformation_matrix = LocalSystemMatrixType(0, 0); LocalSystemVectorType constant_vector = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms EquationIdVectorType slave_equation_id, master_equation_id; const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); std::unordered_set<IndexType> auxiliar_constant_equations_ids; #pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_id, master_equation_id) { std::unordered_set<IndexType> auxiliar_temp_constant_equations_ids; auxiliar_temp_constant_equations_ids.reserve(2000); #pragma omp for schedule(guided, 512) for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if (it_const->IsDefined(ACTIVE)) constraint_is_active = it_const->Is(ACTIVE); if (constraint_is_active) { it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info); it_const->EquationIdVector(slave_equation_id, master_equation_id, r_current_process_info); // Reassign reordered dofs to the master side for (auto& id : master_equation_id) { id = solvable_dof_reorder[id]; } if (ComputeConstantVector) { for (IndexType i = 0; i < slave_equation_id.size(); ++i) { const IndexType i_global = slave_equation_id[i]; if (i_global < BaseType::mEquationSystemSize) { const double constant_value = constant_vector[i]; if (std::abs(constant_value) > 0.0) { auxiliar_temp_constant_equations_ids.insert(i_global); double& r_value = rConstantVector[i_global]; #pragma omp atomic r_value += constant_value; } } } } else { for (IndexType i = 0; i < slave_equation_id.size(); ++i) { const IndexType i_global = slave_equation_id[i]; if (i_global < BaseType::mEquationSystemSize) { const double constant_value = constant_vector[i]; #pragma omp atomic aux_constant_value += std::abs(constant_value); } } } if (ComputeTranslationMatrix) { // Assemble the constraint contribution AssembleRelationMatrix(rTMatrix, transformation_matrix, slave_equation_id, master_equation_id); } } } // We merge all the sets in one thread #pragma omp critical { auxiliar_constant_equations_ids.insert(auxiliar_temp_constant_equations_ids.begin(), auxiliar_temp_constant_equations_ids.end()); } } return aux_constant_value > std::numeric_limits<double>::epsilon() ? true : false; KRATOS_CATCH(""); } /** * @brief This method computes the efective constant * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rDxSolved The vector of unkowns actually solved */ void ComputeEffectiveConstant( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rDxSolved ) { if (mComputeConstantContribution) { // We get const TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSparseSpace::Copy(rConstantVector, rDeltaConstantVector); // We reconstruct the complete vector of Unknowns TSystemVectorType Dx(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, rDxSolved, Dx); // Compute the effective constant vector // Auxiliar initial dof iterator const auto it_dof_begin = BaseType::mDofSet.begin(); TSystemVectorType u(BaseType::mEquationSystemSize); #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { u[equation_id] = it_dof->GetSolutionStepValue() + Dx[equation_id]; } } TSystemVectorType u_bar(mDoFToSolveSystemSize); IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { u_bar[counter] = it_dof->GetSolutionStepValue() + rDxSolved[counter]; counter += 1; } } } TSystemVectorType u_bar_complete(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, u_bar, u_bar_complete); TSparseSpace::UnaliasedAdd(rDeltaConstantVector, 1.0, u_bar_complete); TSparseSpace::UnaliasedAdd(rDeltaConstantVector, -1.0, u); } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedEliminationBuilderAndSolverWithConstraints */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
pbkdf2-hmac-sha512_fmt_plug.c
/* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Based on hmac-sha512 by magnum * * Minor fixes, format unification and OMP support done by Dhiru Kholia * <dhiru@openwall.com> * * Fixed for supporting $ml$ "dave" format as well as GRUB native format by * magnum 2013. Note: We support a binary size of >512 bits (64 bytes / 128 * chars of hex) but we currently do not calculate it even in cmp_exact(). The * chance for a 512-bit hash collision should be pretty dang slim. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pbkdf2_hmac_sha512; #elif FMT_REGISTERS_H john_register_one(&fmt_pbkdf2_hmac_sha512); #else #include <ctype.h> #include <string.h> #include <assert.h> #include "misc.h" #include "arch.h" #include "common.h" #include "formats.h" #include "sha2.h" #include "johnswap.h" #include "stdint.h" #include "pbkdf2_hmac_common.h" #include "pbkdf2_hmac_sha512.h" #define FORMAT_LABEL "PBKDF2-HMAC-SHA512" #undef FORMAT_NAME #define FORMAT_NAME "GRUB2 / OS X 10.8+" #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "PBKDF2-SHA512 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #define SALT_SIZE sizeof(struct custom_salt) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define PAD_SIZE 128 #define PLAINTEXT_LENGTH 125 static struct custom_salt { uint8_t length; uint8_t salt[PBKDF2_64_MAX_SALT_SIZE]; uint32_t rounds; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[PBKDF2_SHA512_BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *p; int saltlen; char delim; memset(&cs, 0, sizeof(cs)); ciphertext += PBKDF2_SHA512_TAG_LEN; cs.rounds = atou(ciphertext); delim = strchr(ciphertext, '.') ? '.' : '$'; ciphertext = strchr(ciphertext, delim) + 1; p = strchr(ciphertext, delim); saltlen = 0; while (ciphertext < p) { /** extract salt **/ cs.salt[saltlen++] = atoi16[ARCH_INDEX(ciphertext[0])] * 16 + atoi16[ARCH_INDEX(ciphertext[1])]; ciphertext += 2; } cs.length = saltlen; return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_SHA512_BINARY_SIZE, 0); #else pbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_SHA512_BINARY_SIZE, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], PBKDF2_SHA512_BINARY_SIZE); } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int cmp_exact(char *source, int index) { return pbkdf2_hmac_sha512_cmp_exact(get_key(index), source, cur_salt->salt, cur_salt->length, cur_salt->rounds); } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->rounds; } struct fmt_main fmt_pbkdf2_hmac_sha512 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, PBKDF2_SHA512_BINARY_SIZE, sizeof(ARCH_WORD_32), SALT_SIZE, sizeof(ARCH_WORD), MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, { "iteration count", }, { PBKDF2_SHA512_FORMAT_TAG, FORMAT_TAG_ML, FORMAT_TAG_GRUB }, pbkdf2_hmac_sha512_common_tests }, { init, done, fmt_default_reset, pbkdf2_hmac_sha512_prepare, pbkdf2_hmac_sha512_valid, pbkdf2_hmac_sha512_split, pbkdf2_hmac_sha512_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__minus_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_08__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_04__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int32) // A*D function (colscale): GB (_AxD__minus_int32) // D*A function (rowscale): GB (_DxB__minus_int32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int32) // C=scalar+B GB (_bind1st__minus_int32) // C=scalar+B' GB (_bind1st_tran__minus_int32) // C=A+scalar GB (_bind2nd__minus_int32) // C=A'+scalar GB (_bind2nd_tran__minus_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT32 || GxB_NO_MINUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
portablegl.h
/* PortableGL 0.95 MIT licensed software renderer that closely mirrors OpenGL 3.x portablegl.com robertwinkler.com Do this: #define PORTABLEGL_IMPLEMENTATION before you include this file in *one* C or C++ file to create the implementation. If you plan on using your own 3D vector/matrix library rather than crsw_math that is built into PortableGL and your names are the standard glsl vec[2-4], mat[3-4] etc., define MANGLE_TYPES too before including portablegl to prefix all those builtin types with glinternal_ to avoid the clash. You can check all the C++ examples and demos, I use my C++ rsw_math library. // i.e. it should look like this: #include ... #include ... #include ... // if required #define MANGLE_TYPES #define PORTABLEGL_IMPLEMENTATION #include "portablegl.h" I use my CVector library for various types in PortableGL so you *can* #define CVEC_ASSERT, CVEC_MEMMOVE, and (mutually inclusive) CVEC_MALLOC, CVEC_REALLOC, and CVEC_FREE before the #include to avoid using the standard library versions. However, currently, I use at least malloc, realloc, and memcpy in PortableGL so doing so wouldn't actually avoid the standard library. Creating equivalent PortableGL macros (that would automagically apply to any internally used cvectors) is a TODO I suppose. QUICK NOTES: Primarily of interest to game/graphics developers and other people who just want to play with the graphics pipeline and don't need peak performance or the the entirety of OpenGL or Vulkan features. RGBA32 is the only currently supported format for textures Only GL_TEXTURE_MAG_FILTER is actually used internally but you can set the MIN_FILTER for a texture. 8-bit per channel RGBA is the only supported format for the framebuffer You can specify the order using the masks in init_glContext. Technically it'd be relatively trivial to add support for other formats but for now we use a u32* to access the buffer. Any PortableGL program has roughly this structure, with some things possibly declared globally or passed around in function parameters as needed: #define WIDTH 640 #define HEIGHT 480 // shaders are functions matching these prototypes void smooth_vs(float* vs_output, void* vertex_attribs, Shader_Builtins* builtins, void* uniforms); void smooth_fs(float* fs_input, Shader_Builtins* builtins, void* uniforms); typedef struct My_Uniforms { mat4 mvp_mat; vec4 v_color; } My_Uniforms; u32* backbuf; glContext the_context; if (!init_glContext(&the_context, &backbuf, WIDTH, HEIGHT, 32, 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000)) { puts("Failed to initialize glContext"); exit(0); } set_glContext(&the_context); // interpolation is an array with an entry of SMOOTH, FLAT or // NOPERSPECTIVE for each float being interpolated between the // vertex and fragment shaders // the last parameter is whether the fragment shader writes to // gl_FragDepth or discard, but it's not currently used. In the future I may // have a macro that enables early depth testing *if* that parameter is // false for a minor performance boost but canonicaly depth test happens // after the frag shader (and scissoring) GLenum interpolation[4] = { SMOOTH, SMOOTH, SMOOTH, SMOOTH }; GLuint myshader = pglCreateProgram(smooth_vs, smooth_fs, 4, interpolation, GL_FALSE); glUseProgram(myshader); My_Uniform the_uniforms; pglSetUniform(&the_uniforms); the_uniforms.v_color = Red; // not actually used, using per vert color memcpy(the_uniforms.mvp_mat, identity, sizeof(mat4)); // Your standard OpenGL buffer setup etc. here // Like the compatibility profile, we allow/enable a default // VAO. We also have a default shader program for the same reason, // something to fill index 0. // see implementation of init_glContext for details while (1) { // standard glDraw calls, switching shaders etc. // use backbuf however you want, whether that's blitting // it to some framebuffer in your GUI system, or even writing // it out to disk with something like stb_image_write. } free_glContext(&the_context); // compare with equivalent glsl below void smooth_vs(float* vs_output, void* vertex_attribs, Shader_Builtins* builtins, void* uniforms) { vec4* v_attribs = vertex_attribs; ((vec4*)vs_output)[0] = v_attribs[1]; //color builtins->gl_Position = mult_mat4_vec4(*((mat4*)uniforms), v_attribs[0]); } void smooth_fs(float* fs_input, Shader_Builtins* builtins, void* uniforms) { builtins->gl_FragColor = ((vec4*)fs_input)[0]; } // note smooth is the default so this is the same as smooth out vec4 vary_color // https://www.khronos.org/opengl/wiki/Type_Qualifier_(GLSL)#Interpolation_qualifiers uniform mvp_mat layout (location = 0) in vec4 in_vertex; layout (location = 1) in vec4 in_color; out vec4 vary_color; void main(void) { vary_color = in_color; gl_Position = mvp_mat * in_vertex; } in vec4 vary_color; out vec4 frag_color; void main(void) { frag_color = vary_color; } That's basically it. There are some other non-standard features like pglSetInterp that lets you change the interpolation of a shader whenever you want. In real OpenGL you'd have to have 2 (or more) separate but almost identical shaders to do that. There are also these predefined maximums which, considering the performance limitations of PortableGL, are probably more than enough. MAX_DRAW_BUFFERS isn't used since they're not currently supported anyway. #define MAX_VERTICES 500000 #define GL_MAX_VERTEX_ATTRIBS 16 #define GL_MAX_VERTEX_OUTPUT_COMPONENTS 64 #define GL_MAX_DRAW_BUFFERS 8 MIT License Copyright (c) 2011-2022 Robert Winkler Copyright (c) 1997-2022 Fabrice Bellard (clipping code from TinyGL) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifdef MANGLE_TYPES #define vec2 glinternal_vec2 #define vec3 glinternal_vec3 #define vec4 glinternal_vec4 #define dvec2 glinternal_dvec2 #define dvec3 glinternal_dvec3 #define dvec4 glinternal_dvec4 #define ivec2 glinternal_ivec2 #define ivec3 glinternal_ivec3 #define ivec4 glinternal_ivec4 #define uvec2 glinternal_uvec2 #define uvec3 glinternal_uvec3 #define uvec4 glinternal_uvec4 #define mat2 glinternal_mat2 #define mat3 glinternal_mat3 #define mat4 glinternal_mat4 #define Color glinternal_Color #define Line glinternal_Line #define Plane glinternal_Plane #endif #ifndef GL_H #define GL_H #ifdef __cplusplus extern "C" { #endif #ifndef CRSW_MATH_H #define CRSW_MATH_H #include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #define RM_PI (3.14159265358979323846) #define RM_2PI (2.0 * RM_PI) #define PI_DIV_180 (0.017453292519943296) #define INV_PI_DIV_180 (57.2957795130823229) #define DEG_TO_RAD(x) ((x)*PI_DIV_180) #define RAD_TO_DEG(x) ((x)*INV_PI_DIV_180) /* Hour angles */ #define HR_TO_DEG(x) ((x) * (1.0 / 15.0)) #define HR_TO_RAD(x) DEG_TO_RAD(HR_TO_DEG(x)) #define DEG_TO_HR(x) ((x) * 15.0) #define RAD_TO_HR(x) DEG_TO_HR(RAD_TO_DEG(x)) // TODO rename RM_MAX? make proper inline functions? #ifndef MAX #define MAX(a, b) ((a) > (b)) ? (a) : (b) #endif #ifndef MIN #define MIN(a, b) ((a) < (b)) ? (a) : (b) #endif #define MAP(X, A, B, C, D) ((X)-(A))/((B)-(A)) * ((D)-(C)) + (C) typedef uint8_t u8; typedef uint16_t u16; typedef uint32_t u32; typedef uint64_t u64; typedef int8_t i8; typedef int16_t i16; typedef int32_t i32; typedef int64_t i64; // returns float [0,1) inline float rsw_randf() { return rand() / (RAND_MAX + 1.0f); } inline float rsw_randf_range(float min, float max) { return min + (max-min) * rsw_randf(); } typedef struct vec2 { float x; float y; } vec2; typedef struct vec3 { float x; float y; float z; } vec3; typedef struct vec4 { float x; float y; float z; float w; } vec4; #define SET_VEC2(v, _x, _y) \ do {\ (v).x = _x;\ (v).y = _y;\ } while (0) #define SET_VEC3(v, _x, _y, _z) \ do {\ (v).x = _x;\ (v).y = _y;\ (v).z = _z;\ } while (0) #define SET_VEC4(v, _x, _y, _z, _w) \ do {\ (v).x = _x;\ (v).y = _y;\ (v).z = _z;\ (v).w = _w;\ } while (0) inline vec2 make_vec2(float x, float y) { vec2 v = { x, y }; return v; } inline vec3 make_vec3(float x, float y, float z) { vec3 v = { x, y, z }; return v; } inline vec4 make_vec4(float x, float y, float z, float w) { vec4 v = { x, y, z, w }; return v; } inline vec2 negate_vec2(vec2 v) { vec2 r = { -v.x, -v.y }; return r; } inline vec3 negate_vec3(vec3 v) { vec3 r = { -v.x, -v.y, -v.z }; return r; } inline vec4 negate_vec4(vec4 v) { vec4 r = { -v.x, -v.y, -v.z, -v.w }; return r; } inline void fprint_vec2(FILE* f, vec2 v, const char* append) { fprintf(f, "(%f, %f)%s", v.x, v.y, append); } inline void fprint_vec3(FILE* f, vec3 v, const char* append) { fprintf(f, "(%f, %f, %f)%s", v.x, v.y, v.z, append); } inline void fprint_vec4(FILE* f, vec4 v, const char* append) { fprintf(f, "(%f, %f, %f, %f)%s", v.x, v.y, v.z, v.w, append); } inline void print_vec2(vec2 v, const char* append) { printf("(%f, %f)%s", v.x, v.y, append); } inline void print_vec3(vec3 v, const char* append) { printf("(%f, %f, %f)%s", v.x, v.y, v.z, append); } inline void print_vec4(vec4 v, const char* append) { printf("(%f, %f, %f, %f)%s", v.x, v.y, v.z, v.w, append); } inline int fread_vec2(FILE* f, vec2* v) { int tmp = fscanf(f, " (%f, %f)", &v->x, &v->y); return (tmp == 2); } inline int fread_vec3(FILE* f, vec3* v) { int tmp = fscanf(f, " (%f, %f, %f)", &v->x, &v->y, &v->z); return (tmp == 3); } inline int fread_vec4(FILE* f, vec4* v) { int tmp = fscanf(f, " (%f, %f, %f, %f)", &v->x, &v->y, &v->z, &v->w); return (tmp == 4); } typedef struct dvec2 { double x; double y; } dvec2; typedef struct dvec3 { double x; double y; double z; } dvec3; typedef struct dvec4 { double x; double y; double z; double w; } dvec4; inline void fprint_dvec2(FILE* f, dvec2 v, const char* append) { fprintf(f, "(%f, %f)%s", v.x, v.y, append); } inline void fprint_dvec3(FILE* f, dvec3 v, const char* append) { fprintf(f, "(%f, %f, %f)%s", v.x, v.y, v.z, append); } inline void fprint_dvec4(FILE* f, dvec4 v, const char* append) { fprintf(f, "(%f, %f, %f, %f)%s", v.x, v.y, v.z, v.w, append); } inline int fread_dvec2(FILE* f, dvec2* v) { int tmp = fscanf(f, " (%lf, %lf)", &v->x, &v->y); return (tmp == 2); } inline int fread_dvec3(FILE* f, dvec3* v) { int tmp = fscanf(f, " (%lf, %lf, %lf)", &v->x, &v->y, &v->z); return (tmp == 3); } inline int fread_dvec4(FILE* f, dvec4* v) { int tmp = fscanf(f, " (%lf, %lf, %lf, %lf)", &v->x, &v->y, &v->z, &v->w); return (tmp == 4); } typedef struct ivec2 { int x; int y; } ivec2; typedef struct ivec3 { int x; int y; int z; } ivec3; typedef struct ivec4 { int x; int y; int z; int w; } ivec4; inline ivec2 make_ivec2(int x, int y) { ivec2 v = { x, y }; return v; } inline ivec3 make_ivec3(int x, int y, int z) { ivec3 v = { x, y, z }; return v; } inline ivec4 make_ivec4(int x, int y, int z, int w) { ivec4 v = { x, y, z, w }; return v; } inline void fprint_ivec2(FILE* f, ivec2 v, const char* append) { fprintf(f, "(%d, %d)%s", v.x, v.y, append); } inline void fprint_ivec3(FILE* f, ivec3 v, const char* append) { fprintf(f, "(%d, %d, %d)%s", v.x, v.y, v.z, append); } inline void fprint_ivec4(FILE* f, ivec4 v, const char* append) { fprintf(f, "(%d, %d, %d, %d)%s", v.x, v.y, v.z, v.w, append); } inline int fread_ivec2(FILE* f, ivec2* v) { int tmp = fscanf(f, " (%d, %d)", &v->x, &v->y); return (tmp == 2); } inline int fread_ivec3(FILE* f, ivec3* v) { int tmp = fscanf(f, " (%d, %d, %d)", &v->x, &v->y, &v->z); return (tmp == 3); } inline int fread_ivec4(FILE* f, ivec4* v) { int tmp = fscanf(f, " (%d, %d, %d, %d)", &v->x, &v->y, &v->z, &v->w); return (tmp == 4); } typedef struct uvec2 { unsigned int x; unsigned int y; } uvec2; typedef struct uvec3 { unsigned int x; unsigned int y; unsigned int z; } uvec3; typedef struct uvec4 { unsigned int x; unsigned int y; unsigned int z; unsigned int w; } uvec4; inline void fprint_uvec2(FILE* f, uvec2 v, const char* append) { fprintf(f, "(%u, %u)%s", v.x, v.y, append); } inline void fprint_uvec3(FILE* f, uvec3 v, const char* append) { fprintf(f, "(%u, %u, %u)%s", v.x, v.y, v.z, append); } inline void fprint_uvec4(FILE* f, uvec4 v, const char* append) { fprintf(f, "(%u, %u, %u, %u)%s", v.x, v.y, v.z, v.w, append); } inline int fread_uvec2(FILE* f, uvec2* v) { int tmp = fscanf(f, " (%u, %u)", &v->x, &v->y); return (tmp == 2); } inline int fread_uvec3(FILE* f, uvec3* v) { int tmp = fscanf(f, " (%u, %u, %u)", &v->x, &v->y, &v->z); return (tmp == 3); } inline int fread_uvec4(FILE* f, uvec4* v) { int tmp = fscanf(f, " (%u, %u, %u, %u)", &v->x, &v->y, &v->z, &v->w); return (tmp == 4); } inline float length_vec2(vec2 a) { return sqrt(a.x * a.x + a.y * a.y); } inline float length_vec3(vec3 a) { return sqrt(a.x * a.x + a.y * a.y + a.z * a.z); } inline vec2 norm_vec2(vec2 a) { float l = length_vec2(a); vec2 c = { a.x/l, a.y/l }; return c; } inline vec3 norm_vec3(vec3 a) { float l = length_vec3(a); vec3 c = { a.x/l, a.y/l, a.z/l }; return c; } inline void normalize_vec2(vec2* a) { float l = length_vec2(*a); a->x /= l; a->y /= l; } inline void normalize_vec3(vec3* a) { float l = length_vec3(*a); a->x /= l; a->y /= l; a->z /= l; } inline vec2 add_vec2s(vec2 a, vec2 b) { vec2 c = { a.x + b.x, a.y + b.y }; return c; } inline vec3 add_vec3s(vec3 a, vec3 b) { vec3 c = { a.x + b.x, a.y + b.y, a.z + b.z }; return c; } inline vec4 add_vec4s(vec4 a, vec4 b) { vec4 c = { a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w }; return c; } inline vec2 sub_vec2s(vec2 a, vec2 b) { vec2 c = { a.x - b.x, a.y - b.y }; return c; } inline vec3 sub_vec3s(vec3 a, vec3 b) { vec3 c = { a.x - b.x, a.y - b.y, a.z - b.z }; return c; } inline vec4 sub_vec4s(vec4 a, vec4 b) { vec4 c = { a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w }; return c; } inline vec2 mult_vec2s(vec2 a, vec2 b) { vec2 c = { a.x * b.x, a.y * b.y }; return c; } inline vec3 mult_vec3s(vec3 a, vec3 b) { vec3 c = { a.x * b.x, a.y * b.y, a.z * b.z }; return c; } inline vec4 mult_vec4s(vec4 a, vec4 b) { vec4 c = { a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w }; return c; } inline vec2 div_vec2s(vec2 a, vec2 b) { vec2 c = { a.x / b.x, a.y / b.y }; return c; } inline vec3 div_vec3s(vec3 a, vec3 b) { vec3 c = { a.x / b.x, a.y / b.y, a.z / b.z }; return c; } inline vec4 div_vec4s(vec4 a, vec4 b) { vec4 c = { a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w }; return c; } inline float dot_vec2s(vec2 a, vec2 b) { return a.x*b.x + a.y*b.y; } inline float dot_vec3s(vec3 a, vec3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } inline float dot_vec4s(vec4 a, vec4 b) { return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; } inline vec2 scale_vec2(vec2 a, float s) { vec2 b = { a.x * s, a.y * s }; return b; } inline vec3 scale_vec3(vec3 a, float s) { vec3 b = { a.x * s, a.y * s, a.z * s }; return b; } inline vec4 scale_vec4(vec4 a, float s) { vec4 b = { a.x * s, a.y * s, a.z * s, a.w * s }; return b; } inline int equal_vec2s(vec2 a, vec2 b) { return (a.x == b.x && a.y == b.y); } inline int equal_vec3s(vec3 a, vec3 b) { return (a.x == b.x && a.y == b.y && a.z == b.z); } inline int equal_vec4s(vec4 a, vec4 b) { return (a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w); } inline int equal_epsilon_vec2s(vec2 a, vec2 b, float epsilon) { return (fabs(a.x-b.x) < epsilon && fabs(a.y - b.y) < epsilon); } inline int equal_epsilon_vec3s(vec3 a, vec3 b, float epsilon) { return (fabs(a.x-b.x) < epsilon && fabs(a.y - b.y) < epsilon && fabs(a.z - b.z) < epsilon); } inline int equal_epsilon_vec4s(vec4 a, vec4 b, float epsilon) { return (fabs(a.x-b.x) < epsilon && fabs(a.y - b.y) < epsilon && fabs(a.z - b.z) < epsilon && fabs(a.w - b.w) < epsilon); } inline vec2 vec4_to_vec2(vec4 a) { vec2 v = { a.x, a.y }; return v; } inline vec3 vec4_to_vec3(vec4 a) { vec3 v = { a.x, a.y, a.z }; return v; } inline vec2 vec4_to_vec2h(vec4 a) { vec2 v = { a.x/a.w, a.y/a.w }; return v; } inline vec3 vec4_to_vec3h(vec4 a) { vec3 v = { a.x/a.w, a.y/a.w, a.z/a.w }; return v; } inline vec3 cross_product(const vec3 u, const vec3 v) { vec3 result; result.x = u.y*v.z - v.y*u.z; result.y = -u.x*v.z + v.x*u.z; result.z = u.x*v.y - v.x*u.y; return result; } inline float angle_between_vec3(const vec3 u, const vec3 v) { return acos(dot_vec3s(u, v)); } /* matrices **************/ typedef float mat2[4]; typedef float mat3[9]; typedef float mat4[16]; #define IDENTITY_MAT2() { 1, 0, 0, 1 } #define IDENTITY_MAT3() { 1, 0, 0, 0, 1, 0, 0, 0, 1 } #define IDENTITY_MAT4() { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 } #define SET_IDENTITY_MAT2(m) \ do { \ m[1] = m[2] = 0; \ m[0] = m[3] = 1; \ } while (0) #define SET_IDENTITY_MAT3(m) \ do { \ memset(m, 0, sizeof(float)*9); \ m[0] = m[4] = m[8] = 1; \ } while (0) #define SET_IDENTITY_MAT4(m) \ do { \ memset(m, 0, sizeof(float)*16); \ m[0] = m[5] = m[10] = m[15] = 1; \ } while (0) #ifndef ROW_MAJOR inline vec2 x_mat2(mat2 m) { return make_vec2(m[0], m[2]); } inline vec2 y_mat2(mat2 m) { return make_vec2(m[1], m[3]); } inline vec2 c1_mat2(mat2 m) { return make_vec2(m[0], m[1]); } inline vec2 c2_mat2(mat2 m) { return make_vec2(m[2], m[3]); } inline void setc1_mat2(mat2 m, vec2 v) { m[0]=v.x, m[1]=v.y; } inline void setc2_mat2(mat2 m, vec2 v) { m[2]=v.x, m[3]=v.y; } inline void setx_mat2(mat2 m, vec2 v) { m[0]=v.x, m[2]=v.y; } inline void sety_mat2(mat2 m, vec2 v) { m[1]=v.x, m[3]=v.y; } #else inline vec2 x_mat2(mat2 m) { return make_vec2(m[0], m[1]); } inline vec2 y_mat2(mat2 m) { return make_vec2(m[2], m[3]); } inline vec2 c1_mat2(mat2 m) { return make_vec2(m[0], m[2]); } inline vec2 c2_mat2(mat2 m) { return make_vec2(m[1], m[3]); } inline void setc1_mat2(mat2 m, vec2 v) { m[0]=v.x, m[2]=v.y; } inline void setc2_mat2(mat2 m, vec2 v) { m[1]=v.x, m[3]=v.y; } inline void setx_mat2(mat2 m, vec2 v) { m[0]=v.x, m[1]=v.y; } inline void sety_mat2(mat2 m, vec2 v) { m[2]=v.x, m[3]=v.y; } #endif #ifndef ROW_MAJOR inline vec3 x_mat3(mat3 m) { return make_vec3(m[0], m[3], m[6]); } inline vec3 y_mat3(mat3 m) { return make_vec3(m[1], m[4], m[7]); } inline vec3 z_mat3(mat3 m) { return make_vec3(m[2], m[5], m[8]); } inline vec3 c1_mat3(mat3 m) { return make_vec3(m[0], m[1], m[2]); } inline vec3 c2_mat3(mat3 m) { return make_vec3(m[3], m[4], m[5]); } inline vec3 c3_mat3(mat3 m) { return make_vec3(m[6], m[7], m[8]); } inline void setc1_mat3(mat3 m, vec3 v) { m[0]=v.x, m[1]=v.y, m[2]=v.z; } inline void setc2_mat3(mat3 m, vec3 v) { m[3]=v.x, m[4]=v.y, m[5]=v.z; } inline void setc3_mat3(mat3 m, vec3 v) { m[6]=v.x, m[7]=v.y, m[8]=v.z; } inline void setx_mat3(mat3 m, vec3 v) { m[0]=v.x, m[3]=v.y, m[6]=v.z; } inline void sety_mat3(mat3 m, vec3 v) { m[1]=v.x, m[4]=v.y, m[7]=v.z; } inline void setz_mat3(mat3 m, vec3 v) { m[2]=v.x, m[5]=v.y, m[8]=v.z; } #else inline vec3 x_mat3(mat3 m) { return make_vec3(m[0], m[1], m[2]); } inline vec3 y_mat3(mat3 m) { return make_vec3(m[3], m[4], m[5]); } inline vec3 z_mat3(mat3 m) { return make_vec3(m[6], m[7], m[8]); } inline vec3 c1_mat3(mat3 m) { return make_vec3(m[0], m[3], m[6]); } inline vec3 c2_mat3(mat3 m) { return make_vec3(m[1], m[4], m[7]); } inline vec3 c3_mat3(mat3 m) { return make_vec3(m[2], m[5], m[8]); } inline void setc1_mat3(mat3 m, vec3 v) { m[0]=v.x, m[3]=v.y, m[6]=v.z; } inline void setc2_mat3(mat3 m, vec3 v) { m[1]=v.x, m[4]=v.y, m[7]=v.z; } inline void setc3_mat3(mat3 m, vec3 v) { m[2]=v.x, m[5]=v.y, m[8]=v.z; } inline void setx_mat3(mat3 m, vec3 v) { m[0]=v.x, m[1]=v.y, m[2]=v.z; } inline void sety_mat3(mat3 m, vec3 v) { m[3]=v.x, m[4]=v.y, m[5]=v.z; } inline void setz_mat3(mat3 m, vec3 v) { m[6]=v.x, m[7]=v.y, m[8]=v.z; } #endif #ifndef ROW_MAJOR inline vec4 c1_mat4(mat4 m) { return make_vec4(m[ 0], m[ 1], m[ 2], m[ 3]); } inline vec4 c2_mat4(mat4 m) { return make_vec4(m[ 4], m[ 5], m[ 6], m[ 7]); } inline vec4 c3_mat4(mat4 m) { return make_vec4(m[ 8], m[ 9], m[10], m[11]); } inline vec4 c4_mat4(mat4 m) { return make_vec4(m[12], m[13], m[14], m[15]); } inline vec4 x_mat4(mat4 m) { return make_vec4(m[0], m[4], m[8], m[12]); } inline vec4 y_mat4(mat4 m) { return make_vec4(m[1], m[5], m[9], m[13]); } inline vec4 z_mat4(mat4 m) { return make_vec4(m[2], m[6], m[10], m[14]); } inline vec4 w_mat4(mat4 m) { return make_vec4(m[3], m[7], m[11], m[15]); } //sets 4th row to 0 0 0 1 inline void setc1_mat4v3(mat4 m, vec3 v) { m[ 0]=v.x, m[ 1]=v.y, m[ 2]=v.z, m[ 3]=0; } inline void setc2_mat4v3(mat4 m, vec3 v) { m[ 4]=v.x, m[ 5]=v.y, m[ 6]=v.z, m[ 7]=0; } inline void setc3_mat4v3(mat4 m, vec3 v) { m[ 8]=v.x, m[ 9]=v.y, m[10]=v.z, m[11]=0; } inline void setc4_mat4v3(mat4 m, vec3 v) { m[12]=v.x, m[13]=v.y, m[14]=v.z, m[15]=1; } inline void setc1_mat4v4(mat4 m, vec4 v) { m[ 0]=v.x, m[ 1]=v.y, m[ 2]=v.z, m[ 3]=v.w; } inline void setc2_mat4v4(mat4 m, vec4 v) { m[ 4]=v.x, m[ 5]=v.y, m[ 6]=v.z, m[ 7]=v.w; } inline void setc3_mat4v4(mat4 m, vec4 v) { m[ 8]=v.x, m[ 9]=v.y, m[10]=v.z, m[11]=v.w; } inline void setc4_mat4v4(mat4 m, vec4 v) { m[12]=v.x, m[13]=v.y, m[14]=v.z, m[15]=v.w; } //sets 4th column to 0 0 0 1 inline void setx_mat4v3(mat4 m, vec3 v) { m[0]=v.x, m[4]=v.y, m[ 8]=v.z, m[12]=0; } inline void sety_mat4v3(mat4 m, vec3 v) { m[1]=v.x, m[5]=v.y, m[ 9]=v.z, m[13]=0; } inline void setz_mat4v3(mat4 m, vec3 v) { m[2]=v.x, m[6]=v.y, m[10]=v.z, m[14]=0; } inline void setw_mat4v3(mat4 m, vec3 v) { m[3]=v.x, m[7]=v.y, m[11]=v.z, m[15]=1; } inline void setx_mat4v4(mat4 m, vec4 v) { m[0]=v.x, m[4]=v.y, m[ 8]=v.z, m[12]=v.w; } inline void sety_mat4v4(mat4 m, vec4 v) { m[1]=v.x, m[5]=v.y, m[ 9]=v.z, m[13]=v.w; } inline void setz_mat4v4(mat4 m, vec4 v) { m[2]=v.x, m[6]=v.y, m[10]=v.z, m[14]=v.w; } inline void setw_mat4v4(mat4 m, vec4 v) { m[3]=v.x, m[7]=v.y, m[11]=v.z, m[15]=v.w; } #else inline vec4 c1_mat4(mat4 m) { return make_vec4(m[0], m[4], m[8], m[12]); } inline vec4 c2_mat4(mat4 m) { return make_vec4(m[1], m[5], m[9], m[13]); } inline vec4 c3_mat4(mat4 m) { return make_vec4(m[2], m[6], m[10], m[14]); } inline vec4 c4_mat4(mat4 m) { return make_vec4(m[3], m[7], m[11], m[15]); } inline vec4 x_mat4(mat4 m) { return make_vec4(m[0], m[1], m[2], m[3]); } inline vec4 y_mat4(mat4 m) { return make_vec4(m[4], m[5], m[6], m[7]); } inline vec4 z_mat4(mat4 m) { return make_vec4(m[8], m[9], m[10], m[11]); } inline vec4 w_mat4(mat4 m) { return make_vec4(m[12], m[13], m[14], m[15]); } //sets 4th row to 0 0 0 1 inline void setc1_mat4v3(mat4 m, vec3 v) { m[0]=v.x, m[4]=v.y, m[8]=v.z, m[12]=0; } inline void setc2_mat4v3(mat4 m, vec3 v) { m[1]=v.x, m[5]=v.y, m[9]=v.z, m[13]=0; } inline void setc3_mat4v3(mat4 m, vec3 v) { m[2]=v.x, m[6]=v.y, m[10]=v.z, m[14]=0; } inline void setc4_mat4v3(mat4 m, vec3 v) { m[3]=v.x, m[7]=v.y, m[11]=v.z, m[15]=1; } inline void setc1_mat4v4(mat4 m, vec4 v) { m[0]=v.x, m[4]=v.y, m[8]=v.z, m[12]=v.w; } inline void setc2_mat4v4(mat4 m, vec4 v) { m[1]=v.x, m[5]=v.y, m[9]=v.z, m[13]=v.w; } inline void setc3_mat4v4(mat4 m, vec4 v) { m[2]=v.x, m[6]=v.y, m[10]=v.z, m[14]=v.w; } inline void setc4_mat4v4(mat4 m, vec4 v) { m[3]=v.x, m[7]=v.y, m[11]=v.z, m[15]=v.w; } //sets 4th column to 0 0 0 1 inline void setx_mat4v3(mat4 m, vec3 v) { m[0]=v.x, m[1]=v.y, m[2]=v.z, m[3]=0; } inline void sety_mat4v3(mat4 m, vec3 v) { m[4]=v.x, m[5]=v.y, m[6]=v.z, m[7]=0; } inline void setz_mat4v3(mat4 m, vec3 v) { m[8]=v.x, m[9]=v.y, m[10]=v.z, m[11]=0; } inline void setw_mat4v3(mat4 m, vec3 v) { m[12]=v.x, m[13]=v.y, m[14]=v.z, m[15]=1; } inline void setx_mat4v4(mat4 m, vec4 v) { m[0]=v.x, m[1]=v.y, m[2]=v.z, m[3]=v.w; } inline void sety_mat4v4(mat4 m, vec4 v) { m[4]=v.x, m[5]=v.y, m[6]=v.z, m[7]=v.w; } inline void setz_mat4v4(mat4 m, vec4 v) { m[8]=v.x, m[9]=v.y, m[10]=v.z, m[11]=v.w; } inline void setw_mat4v4(mat4 m, vec4 v) { m[12]=v.x, m[13]=v.y, m[14]=v.z, m[15]=v.w; } #endif inline void fprint_mat2(FILE* f, mat2 m, const char* append) { #ifndef ROW_MAJOR fprintf(f, "[(%f, %f)\n (%f, %f)]%s", m[0], m[2], m[1], m[3], append); #else fprintf(f, "[(%f, %f)\n (%f, %f)]%s", m[0], m[1], m[2], m[3], append); #endif } inline void fprint_mat3(FILE* f, mat3 m, const char* append) { #ifndef ROW_MAJOR fprintf(f, "[(%f, %f, %f)\n (%f, %f, %f)\n (%f, %f, %f)]%s", m[0], m[3], m[6], m[1], m[4], m[7], m[2], m[5], m[8], append); #else fprintf(f, "[(%f, %f, %f)\n (%f, %f, %f)\n (%f, %f, %f)]%s", m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8], append); #endif } inline void fprint_mat4(FILE* f, mat4 m, const char* append) { #ifndef ROW_MAJOR fprintf(f, "[(%f, %f, %f, %f)\n(%f, %f, %f, %f)\n(%f, %f, %f, %f)\n(%f, %f, %f, %f)]%s", m[0], m[4], m[8], m[12], m[1], m[5], m[9], m[13], m[2], m[6], m[10], m[14], m[3], m[7], m[11], m[15], append); #else fprintf(f, "[(%f, %f, %f, %f)\n(%f, %f, %f, %f)\n(%f, %f, %f, %f)\n(%f, %f, %f, %f)]%s", m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8], m[9], m[10], m[11], m[12], m[13], m[14], m[15], append); #endif } // macros? inline void print_mat2(mat2 m, const char* append) { fprint_mat2(stdout, m, append); } inline void print_mat3(mat3 m, const char* append) { fprint_mat3(stdout, m, append); } inline void print_mat4(mat4 m, const char* append) { fprint_mat4(stdout, m, append); } //TODO define macros for doing array version inline vec2 mult_mat2_vec2(mat2 m, vec2 v) { vec2 r; #ifndef ROW_MAJOR r.x = m[0]*v.x + m[2]*v.y; r.y = m[1]*v.x + m[3]*v.y; #else r.x = m[0]*v.x + m[1]*v.y; r.y = m[3]*v.x + m[3]*v.y; #endif return r; } inline vec3 mult_mat3_vec3(mat3 m, vec3 v) { vec3 r; #ifndef ROW_MAJOR r.x = m[0]*v.x + m[3]*v.y + m[6]*v.z; r.y = m[1]*v.x + m[4]*v.y + m[7]*v.z; r.z = m[2]*v.x + m[5]*v.y + m[8]*v.z; #else r.x = m[0]*v.x + m[1]*v.y + m[2]*v.z; r.y = m[3]*v.x + m[4]*v.y + m[5]*v.z; r.z = m[6]*v.x + m[7]*v.y + m[8]*v.z; #endif return r; } inline vec4 mult_mat4_vec4(mat4 m, vec4 v) { vec4 r; #ifndef ROW_MAJOR r.x = m[0]*v.x + m[4]*v.y + m[8]*v.z + m[12]*v.w; r.y = m[1]*v.x + m[5]*v.y + m[9]*v.z + m[13]*v.w; r.z = m[2]*v.x + m[6]*v.y + m[10]*v.z + m[14]*v.w; r.w = m[3]*v.x + m[7]*v.y + m[11]*v.z + m[15]*v.w; #else r.x = m[0]*v.x + m[1]*v.y + m[2]*v.z + m[3]*v.w; r.y = m[4]*v.x + m[5]*v.y + m[6]*v.z + m[7]*v.w; r.z = m[8]*v.x + m[9]*v.y + m[10]*v.z + m[11]*v.w; r.w = m[12]*v.x + m[13]*v.y + m[14]*v.z + m[15]*v.w; #endif return r; } void mult_mat4_mat4(mat4 c, mat4 a, mat4 b); void load_rotation_mat3(mat3 mat, vec3 v, float angle); void load_rotation_mat4(mat4 mat, vec3 vec, float angle); //void invert_mat4(mat4 mInverse, const mat4 m); void make_perspective_matrix(mat4 mat, float fFov, float aspect, float near, float far); void make_pers_matrix(mat4 mat, float z_near, float z_far); void make_perspective_proj_matrix(mat4 mat, float left, float right, float bottom, float top, float near, float far); void make_orthographic_matrix(mat4 mat, float left, float right, float bottom, float top, float near, float far); void make_viewport_matrix(mat4 mat, int x, int y, unsigned int width, unsigned int height, int opengl); void lookAt(mat4 mat, vec3 eye, vec3 center, vec3 up); ///////////Matrix transformation functions inline void scale_mat3(mat3 m, float x, float y, float z) { #ifndef ROW_MAJOR m[0] = x; m[3] = 0; m[6] = 0; m[1] = 0; m[4] = y; m[7] = 0; m[2] = 0; m[5] = 0; m[8] = z; #else m[0] = x; m[1] = 0; m[2] = 0; m[3] = 0; m[4] = y; m[5] = 0; m[6] = 0; m[7] = 0; m[8] = z; #endif } inline void scale_mat4(mat4 m, float x, float y, float z) { #ifndef ROW_MAJOR m[ 0] = x; m[ 4] = 0; m[ 8] = 0; m[12] = 0; m[ 1] = 0; m[ 5] = y; m[ 9] = 0; m[13] = 0; m[ 2] = 0; m[ 6] = 0; m[10] = z; m[14] = 0; m[ 3] = 0; m[ 7] = 0; m[11] = 0; m[15] = 1; #else m[ 0] = x; m[ 1] = 0; m[ 2] = 0; m[ 3] = 0; m[ 4] = 0; m[ 5] = y; m[ 6] = 0; m[ 7] = 0; m[ 8] = 0; m[ 9] = 0; m[10] = z; m[11] = 0; m[12] = 0; m[13] = 0; m[14] = 0; m[15] = 1; #endif } // Create a Translation matrix. Only 4x4 matrices have translation components inline void translation_mat4(mat4 m, float x, float y, float z) { #ifndef ROW_MAJOR m[ 0] = 1; m[ 4] = 0; m[ 8] = 0; m[12] = x; m[ 1] = 0; m[ 5] = 1; m[ 9] = 0; m[13] = y; m[ 2] = 0; m[ 6] = 0; m[10] = 1; m[14] = z; m[ 3] = 0; m[ 7] = 0; m[11] = 0; m[15] = 1; #else m[ 0] = 1; m[ 1] = 0; m[ 2] = 0; m[ 3] = x; m[ 4] = 0; m[ 5] = 1; m[ 6] = 0; m[ 7] = y; m[ 8] = 0; m[ 9] = 0; m[10] = 1; m[11] = z; m[12] = 0; m[13] = 0; m[14] = 0; m[15] = 1; #endif } // Extract a rotation matrix from a 4x4 matrix // Extracts the rotation matrix (3x3) from a 4x4 matrix // #ifndef ROW_MAJOR #define M44(m, row, col) m[col*4 + row] #define M33(m, row, col) m[col*3 + row] #else #define M44(m, row, col) m[row*4 + col] #define M33(m, row, col) m[row*3 + col] #endif inline void extract_rotation_mat4(mat3 dst, mat4 src, int normalize) { vec3 tmp; if (normalize) { tmp.x = M44(src, 0, 0); tmp.y = M44(src, 1, 0); tmp.z = M44(src, 2, 0); normalize_vec3(&tmp); M33(dst, 0, 0) = tmp.x; M33(dst, 1, 0) = tmp.y; M33(dst, 2, 0) = tmp.z; tmp.x = M44(src, 0, 1); tmp.y = M44(src, 1, 1); tmp.z = M44(src, 2, 1); normalize_vec3(&tmp); M33(dst, 0, 1) = tmp.x; M33(dst, 1, 1) = tmp.y; M33(dst, 2, 1) = tmp.z; tmp.x = M44(src, 0, 2); tmp.y = M44(src, 1, 2); tmp.z = M44(src, 2, 2); normalize_vec3(&tmp); M33(dst, 0, 2) = tmp.x; M33(dst, 1, 2) = tmp.y; M33(dst, 2, 2) = tmp.z; } else { M33(dst, 0, 0) = M44(src, 0, 0); M33(dst, 1, 0) = M44(src, 1, 0); M33(dst, 2, 0) = M44(src, 2, 0); M33(dst, 0, 1) = M44(src, 0, 1); M33(dst, 1, 1) = M44(src, 1, 1); M33(dst, 2, 1) = M44(src, 2, 1); M33(dst, 0, 2) = M44(src, 0, 2); M33(dst, 1, 2) = M44(src, 1, 2); M33(dst, 2, 2) = M44(src, 2, 2); } } #undef M33 #undef M44 #ifndef EXCLUDE_GLSL // GLSL functions // static inline float clamp_01(float f) { if (f < 0.0f) return 0.0f; if (f > 1.0f) return 1.0f; return f; } static inline float clamp(float x, float minVal, float maxVal) { if (x < minVal) return minVal; if (x > maxVal) return maxVal; return x; } #define PGL_VECTORIZE2_VEC(func) \ inline vec2 func##_vec2(vec2 v) \ { \ return make_vec2(func(v.x), func(v.y)); \ } #define PGL_VECTORIZE3_VEC(func) \ inline vec3 func##_vec3(vec3 v) \ { \ return make_vec3(func(v.x), func(v.y), func(v.z)); \ } #define PGL_VECTORIZE4_VEC(func) \ inline vec4 func##_vec4(vec4 v) \ { \ return make_vec4(func(v.x), func(v.y), func(v.z), func(v.w)); \ } #define PGL_VECTORIZE_VEC(func) \ PGL_VECTORIZE2_VEC(func) \ PGL_VECTORIZE3_VEC(func) \ PGL_VECTORIZE4_VEC(func) #define PGL_STATIC_VECTORIZE2_VEC(func) \ static inline vec2 func##_vec2(vec2 v) \ { \ return make_vec2(func(v.x), func(v.y)); \ } #define PGL_STATIC_VECTORIZE3_VEC(func) \ static inline vec3 func##_vec3(vec3 v) \ { \ return make_vec3(func(v.x), func(v.y), func(v.z)); \ } #define PGL_STATIC_VECTORIZE4_VEC(func) \ static inline vec4 func##_vec4(vec4 v) \ { \ return make_vec4(func(v.x), func(v.y), func(v.z), func(v.w)); \ } #define PGL_STATIC_VECTORIZE_VEC(func) \ PGL_STATIC_VECTORIZE2_VEC(func) \ PGL_STATIC_VECTORIZE3_VEC(func) \ PGL_STATIC_VECTORIZE4_VEC(func) static inline vec2 clamp_vec2(vec2 x, float minVal, float maxVal) { return make_vec2(clamp(x.x, minVal, maxVal), clamp(x.y, minVal, maxVal)); } static inline vec3 clamp_vec3(vec3 x, float minVal, float maxVal) { return make_vec3(clamp(x.x, minVal, maxVal), clamp(x.y, minVal, maxVal), clamp(x.z, minVal, maxVal)); } static inline vec4 clamp_vec4(vec4 x, float minVal, float maxVal) { return make_vec4(clamp(x.x, minVal, maxVal), clamp(x.y, minVal, maxVal), clamp(x.z, minVal, maxVal), clamp(x.w, minVal, maxVal)); } static float distance_vec2(vec2 a, vec2 b) { return length_vec2(sub_vec2s(a, b)); } static float distance_vec3(vec3 a, vec3 b) { return length_vec3(sub_vec3s(a, b)); } static inline vec3 reflect_vec3(vec3 i, vec3 n) { return sub_vec3s(i, scale_vec3(n, 2 * dot_vec3s(i, n))); } static inline float smoothstep(float edge0, float edge1, float x) { float t = clamp_01((x-edge0)/(edge1-edge0)); return t*t*(3 - 2*t); } static inline float mix(float x, float y, float a) { return x*(1-a) + y*a; } static inline vec2 mix_vec2s(vec2 x, vec2 y, float a) { return add_vec2s(scale_vec2(x, (1-a)), scale_vec2(y, a)); } static inline vec3 mix_vec3s(vec3 x, vec3 y, float a) { return add_vec3s(scale_vec3(x, (1-a)), scale_vec3(y, a)); } static inline vec4 mix_vec4s(vec4 x, vec4 y, float a) { return add_vec4s(scale_vec4(x, (1-a)), scale_vec4(y, a)); } // TODO should I use the float versions or the double versions for slightly // increased accuracy? PGL_VECTORIZE_VEC(fabsf) PGL_VECTORIZE_VEC(floorf) PGL_VECTORIZE_VEC(ceilf) PGL_VECTORIZE_VEC(sinf) PGL_VECTORIZE_VEC(cosf) PGL_VECTORIZE_VEC(tanf) PGL_VECTORIZE_VEC(asinf) PGL_VECTORIZE_VEC(acosf) PGL_VECTORIZE_VEC(atanf) PGL_VECTORIZE_VEC(sinhf) PGL_VECTORIZE_VEC(coshf) PGL_VECTORIZE_VEC(tanhf) static inline float radians(float degrees) { return DEG_TO_RAD(degrees); } static inline float degrees(float radians) { return RAD_TO_DEG(radians); } static inline float fract(float x) { return x - floor(x); } PGL_STATIC_VECTORIZE_VEC(radians) PGL_STATIC_VECTORIZE_VEC(degrees) PGL_STATIC_VECTORIZE_VEC(fract) #endif typedef struct Color { u8 r; u8 g; u8 b; u8 a; } Color; /* Color make_Color() { r = g = b = 0; a = 255; } */ inline Color make_Color(u8 red, u8 green, u8 blue, u8 alpha) { Color c = { red, green, blue, alpha }; return c; } inline void print_Color(Color c, const char* append) { printf("(%d, %d, %d, %d)%s", c.r, c.g, c.b, c.a, append); } inline Color vec4_to_Color(vec4 v) { Color c; //assume all in the range of [0, 1] //TODO(rswinkle): round like HH? ie (u8)(v.x * 255.0f + 0.5f) c.r = v.x * 255; c.g = v.y * 255; c.b = v.z * 255; c.a = v.w * 255; return c; } inline vec4 Color_to_vec4(Color c) { vec4 v = { (float)c.r/255.0f, (float)c.g/255.0f, (float)c.b/255.0f, (float)c.a/255.0f }; return v; } typedef struct Line { float A, B, C; } Line; inline Line make_Line(float x1, float y1, float x2, float y2) { Line l; l.A = y1 - y2; l.B = x2 - x1; l.C = x1*y2 - x2*y1; return l; } inline float line_func(Line* line, float x, float y) { return line->A*x + line->B*y + line->C; } inline float line_findy(Line* line, float x) { return -(line->A*x + line->C)/line->B; } inline float line_findx(Line* line, float y) { return -(line->B*y + line->C)/line->A; } typedef struct Plane { vec3 n; //normal points x on plane satisfy n dot x = d float d; //d = n dot p } Plane; /* Plane() {} Plane(vec3 a, vec3 b, vec3 c) //ccw winding { n = cross_product(b-a, c-a).norm(); d = n * a; } */ //int intersect_segment_plane(vec3 a, vec3 b, Plane p, float* t, vec3* q); // TODO hmm would have to change mat3 and mat4 to proper // structures to have operators return them since our // current mat*mat functions take the output mat as a parameter #ifdef __cplusplus inline vec3 operator*(mat3 m, vec3& v) { vec3 r; #ifndef ROW_MAJOR r.x = m[0]*v.x + m[3]*v.y + m[6]*v.z; r.y = m[1]*v.x + m[4]*v.y + m[7]*v.z; r.z = m[2]*v.x + m[5]*v.y + m[8]*v.z; #else r.x = m[0]*v.x + m[1]*v.y + m[2]*v.z; r.y = m[3]*v.x + m[4]*v.y + m[5]*v.z; r.z = m[6]*v.x + m[7]*v.y + m[8]*v.z; #endif return r; } #endif /* CRSW_MATH_H */ #endif #ifndef CVECTOR_float_H #define CVECTOR_float_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for float vector. */ typedef struct cvector_float { float* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_float; extern size_t CVEC_float_SZ; int cvec_float(cvector_float* vec, size_t size, size_t capacity); int cvec_init_float(cvector_float* vec, float* vals, size_t num); cvector_float* cvec_float_heap(size_t size, size_t capacity); cvector_float* cvec_init_float_heap(float* vals, size_t num); int cvec_copyc_float(void* dest, void* src); int cvec_copy_float(cvector_float* dest, cvector_float* src); int cvec_push_float(cvector_float* vec, float a); float cvec_pop_float(cvector_float* vec); int cvec_extend_float(cvector_float* vec, size_t num); int cvec_insert_float(cvector_float* vec, size_t i, float a); int cvec_insert_array_float(cvector_float* vec, size_t i, float* a, size_t num); float cvec_replace_float(cvector_float* vec, size_t i, float a); void cvec_erase_float(cvector_float* vec, size_t start, size_t end); int cvec_reserve_float(cvector_float* vec, size_t size); int cvec_set_cap_float(cvector_float* vec, size_t size); void cvec_set_val_sz_float(cvector_float* vec, float val); void cvec_set_val_cap_float(cvector_float* vec, float val); float* cvec_back_float(cvector_float* vec); void cvec_clear_float(cvector_float* vec); void cvec_free_float_heap(void* vec); void cvec_free_float(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_float_H */ #endif #ifdef CVECTOR_float_IMPLEMENTATION size_t CVEC_float_SZ = 50; #define CVEC_float_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_float* cvec_float_heap(size_t size, size_t capacity) { cvector_float* vec; if (!(vec = (cvector_float*)CVEC_MALLOC(sizeof(cvector_float)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_float_SZ; if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_float* cvec_init_float_heap(float* vals, size_t num) { cvector_float* vec; if (!(vec = (cvector_float*)CVEC_MALLOC(sizeof(cvector_float)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_float_SZ; vec->size = num; if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(float)*num); return vec; } int cvec_float(cvector_float* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_float_SZ; if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_float(cvector_float* vec, float* vals, size_t num) { vec->capacity = num + CVEC_float_SZ; vec->size = num; if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(float)*num); return 1; } int cvec_copyc_float(void* dest, void* src) { cvector_float* vec1 = (cvector_float*)dest; cvector_float* vec2 = (cvector_float*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_float(vec1, vec2); } int cvec_copy_float(cvector_float* dest, cvector_float* src) { float* tmp = NULL; if (!(tmp = (float*)CVEC_REALLOC(dest->a, src->capacity*sizeof(float)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(float)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_float(cvector_float* vec, float a) { float* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_float_ALLOCATOR(vec->capacity); if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } float cvec_pop_float(cvector_float* vec) { return vec->a[--vec->size]; } float* cvec_back_float(cvector_float* vec) { return &vec->a[vec->size-1]; } int cvec_extend_float(cvector_float* vec, size_t num) { float* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_float_SZ; if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_float(cvector_float* vec, size_t i, float a) { float* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(float)); vec->a[i] = a; } else { tmp_sz = CVEC_float_ALLOCATOR(vec->capacity); if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(float)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_float(cvector_float* vec, size_t i, float* a, size_t num) { float* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_float_SZ; if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(float)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(float)); vec->size += num; return 1; } float cvec_replace_float(cvector_float* vec, size_t i, float a) { float tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_float(cvector_float* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(float)); vec->size -= d; } int cvec_reserve_float(cvector_float* vec, size_t size) { float* tmp; if (vec->capacity < size) { if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*(size+CVEC_float_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_float_SZ; } return 1; } int cvec_set_cap_float(cvector_float* vec, size_t size) { float* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_float(cvector_float* vec, float val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_float(cvector_float* vec, float val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_float(cvector_float* vec) { vec->size = 0; } void cvec_free_float_heap(void* vec) { cvector_float* tmp = (cvector_float*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_float(void* vec) { cvector_float* tmp = (cvector_float*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif #include <stdint.h> typedef uint32_t GLuint; typedef int32_t GLint; typedef int64_t GLint64; typedef uint64_t GLuint64; typedef uint16_t GLushort; typedef int16_t GLshort; typedef uint8_t GLubyte; typedef int8_t GLbyte; typedef char GLchar; typedef int32_t GLsizei; //they use plain int not unsigned like you'd think typedef int GLenum; typedef int GLbitfield; typedef float GLfloat; typedef float GLclampf; typedef double GLdouble; typedef void GLvoid; typedef uint8_t GLboolean; enum { //gl error codes GL_NO_ERROR = 0, GL_INVALID_ENUM, GL_INVALID_VALUE, GL_INVALID_OPERATION, GL_INVALID_FRAMEBUFFER_OPERATION, GL_OUT_OF_MEMORY, //buffer types GL_ARRAY_BUFFER, GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, GL_ELEMENT_ARRAY_BUFFER, GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER, GL_TEXTURE_BUFFER, GL_TRANSFORM_FEEDBACK_BUFFER, GL_UNIFORM_BUFFER, GL_NUM_BUFFER_TYPES, // Framebuffer stuff (unused/supported yet) GL_FRAMEBUFFER, GL_DRAW_FRAMEBUFFER, GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1, GL_COLOR_ATTACHMENT2, GL_COLOR_ATTACHMENT3, GL_COLOR_ATTACHMENT4, GL_COLOR_ATTACHMENT5, GL_COLOR_ATTACHMENT6, GL_COLOR_ATTACHMENT7, GL_DEPTH_ATTACHMENT, GL_STENCIL_ATTACHMENT, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, //buffer use hints (not used currently) GL_STREAM_DRAW, GL_STREAM_READ, GL_STREAM_COPY, GL_STATIC_DRAW, GL_STATIC_READ, GL_STATIC_COPY, GL_DYNAMIC_DRAW, GL_DYNAMIC_READ, GL_DYNAMIC_COPY, // mapped buffer access GL_READ_ONLY, GL_WRITE_ONLY, GL_READ_WRITE, //polygon modes GL_POINT, GL_LINE, GL_FILL, //primitive types GL_POINTS, GL_LINES, GL_LINE_STRIP, GL_LINE_LOOP, GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN, // unsupported primitives because I don't support the geometry shader GL_LINE_STRIP_AJACENCY, GL_LINES_AJACENCY, GL_TRIANGLES_AJACENCY, GL_TRIANGLE_STRIP_AJACENCY, //depth functions (and stencil funcs) GL_LESS, GL_LEQUAL, GL_GREATER, GL_GEQUAL, GL_EQUAL, GL_NOTEQUAL, GL_ALWAYS, GL_NEVER, //blend functions GL_ZERO, GL_ONE, GL_SRC_COLOR, GL_ONE_MINUS_SRC_COLOR, GL_DST_COLOR, GL_ONE_MINUS_DST_COLOR, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_DST_ALPHA, GL_ONE_MINUS_DST_ALPHA, GL_CONSTANT_COLOR, GL_ONE_MINUS_CONSTANT_COLOR, GL_CONSTANT_ALPHA, GL_ONE_MINUS_CONSTANT_ALPHA, GL_SRC_ALPHA_SATURATE, NUM_BLEND_FUNCS, GL_SRC1_COLOR, GL_ONE_MINUS_SRC1_COLOR, GL_SRC1_ALPHA, GL_ONE_MINUS_SRC1_ALPHA, //NUM_BLEND_FUNCS //blend equations GL_FUNC_ADD, GL_FUNC_SUBTRACT, GL_FUNC_REVERSE_SUBTRACT, GL_MIN, GL_MAX, NUM_BLEND_EQUATIONS, //texture types GL_TEXTURE_UNBOUND, GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D, GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_RECTANGLE, GL_TEXTURE_CUBE_MAP, GL_NUM_TEXTURE_TYPES, GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X, GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, //texture parameters i GL_TEXTURE_BASE_LEVEL, GL_TEXTURE_BORDER_COLOR, // doesn't actually do anything GL_TEXTURE_COMPARE_FUNC, GL_TEXTURE_COMPARE_MODE, GL_TEXTURE_LOD_BIAS, GL_TEXTURE_MIN_FILTER, GL_TEXTURE_MAG_FILTER, GL_TEXTURE_MIN_LOD, GL_TEXTURE_MAX_LOD, GL_TEXTURE_MAX_LEVEL, GL_TEXTURE_SWIZZLE_R, GL_TEXTURE_SWIZZLE_G, GL_TEXTURE_SWIZZLE_B, GL_TEXTURE_SWIZZLE_A, GL_TEXTURE_SWIZZLE_RGBA, GL_TEXTURE_WRAP_S, GL_TEXTURE_WRAP_T, GL_TEXTURE_WRAP_R, //texture parameter values GL_REPEAT, GL_CLAMP_TO_EDGE, GL_CLAMP_TO_BORDER, // not supported, alias to CLAMP_TO_EDGE GL_MIRRORED_REPEAT, GL_NEAREST, GL_LINEAR, GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_NEAREST, GL_LINEAR_MIPMAP_LINEAR, //texture/depth/stencil formats GL_RED, GL_RG, GL_RGB, GL_BGR, GL_RGBA, GL_BGRA, GL_COMPRESSED_RED, GL_COMPRESSED_RG, GL_COMPRESSED_RGB, GL_COMPRESSED_RGBA, //lots more go here but not important // None of these are used currently just to help porting GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT24, GL_DEPTH_COMPONENT32, GL_DEPTH_COMPONENT32F, // PGL uses a float depth buffer GL_DEPTH24_STENCIL8, GL_DEPTH32F_STENCIL8, // <- we do this GL_STENCIL_INDEX1, GL_STENCIL_INDEX4, GL_STENCIL_INDEX8, // this GL_STENCIL_INDEX16, //PixelStore parameters GL_UNPACK_ALIGNMENT, GL_PACK_ALIGNMENT, // Texture unit's (not used but eases porting) // but I'm not doing 80 or bothering with GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS GL_TEXTURE0, GL_TEXTURE1, GL_TEXTURE2, GL_TEXTURE3, GL_TEXTURE4, GL_TEXTURE5, GL_TEXTURE6, GL_TEXTURE7, //implemented glEnable options GL_CULL_FACE, GL_DEPTH_TEST, GL_DEPTH_CLAMP, GL_LINE_SMOOTH, // TODO correctly GL_BLEND, GL_COLOR_LOGIC_OP, GL_POLYGON_OFFSET_FILL, GL_SCISSOR_TEST, GL_STENCIL_TEST, //provoking vertex GL_FIRST_VERTEX_CONVENTION, GL_LAST_VERTEX_CONVENTION, //point sprite stuff GL_POINT_SPRITE_COORD_ORIGIN, GL_UPPER_LEFT, GL_LOWER_LEFT, //front face determination/culling GL_FRONT, GL_BACK, GL_FRONT_AND_BACK, GL_CCW, GL_CW, // glLogicOp logic ops GL_CLEAR, GL_SET, GL_COPY, GL_COPY_INVERTED, GL_NOOP, GL_AND, GL_NAND, GL_OR, GL_NOR, GL_XOR, GL_EQUIV, GL_AND_REVERSE, GL_AND_INVERTED, GL_OR_REVERSE, GL_OR_INVERTED, GL_INVERT, // glStencilOp GL_KEEP, //GL_ZERO, already defined in blend functions aggh GL_REPLACE, GL_INCR, GL_INCR_WRAP, GL_DECR, GL_DECR_WRAP, //GL_INVERT, // already defined in LogicOps //data types GL_UNSIGNED_BYTE, GL_BYTE, GL_BITMAP, GL_UNSIGNED_SHORT, GL_SHORT, GL_UNSIGNED_INT, GL_INT, GL_FLOAT, //glGetString info GL_VENDOR, GL_RENDERER, GL_VERSION, GL_SHADING_LANGUAGE_VERSION, // glGet enums GL_POLYGON_OFFSET_FACTOR, GL_POLYGON_OFFSET_UNITS, GL_POINT_SIZE, GL_DEPTH_CLEAR_VALUE, GL_DEPTH_RANGE, GL_STENCIL_WRITE_MASK, GL_STENCIL_REF, GL_STENCIL_VALUE_MASK, GL_STENCIL_FUNC, GL_STENCIL_FAIL, GL_STENCIL_PASS_DEPTH_FAIL, GL_STENCIL_PASS_DEPTH_PASS, GL_STENCIL_BACK_WRITE_MASK, GL_STENCIL_BACK_REF, GL_STENCIL_BACK_VALUE_MASK, GL_STENCIL_BACK_FUNC, GL_STENCIL_BACK_FAIL, GL_STENCIL_BACK_PASS_DEPTH_FAIL, GL_STENCIL_BACK_PASS_DEPTH_PASS, GL_LOGIC_OP_MODE, GL_BLEND_SRC_RGB, GL_BLEND_SRC_ALPHA, GL_BLEND_DST_RGB, GL_BLEND_DST_ALPHA, GL_BLEND_EQUATION_RGB, GL_BLEND_EQUATION_ALPHA, GL_CULL_FACE_MODE, GL_FRONT_FACE, GL_DEPTH_FUNC, //GL_POINT_SPRITE_COORD_ORIGIN, GL_PROVOKING_VERTEX, GL_POLYGON_MODE, //shader types etc. not used, just here for compatibility add what you //need so you can use your OpenGL code with PortableGL with minimal changes GL_COMPUTE_SHADER, GL_VERTEX_SHADER, GL_TESS_CONTROL_SHADER, GL_TESS_EVALUATION_SHADER, GL_GEOMETRY_SHADER, GL_FRAGMENT_SHADER, GL_INFO_LOG_LENGTH, GL_COMPILE_STATUS, GL_LINK_STATUS, // buffer clearing selections are a mask so can't have overlap // choosing arbitrary bits higher than all other constants in enum GL_COLOR_BUFFER_BIT = 1 << 10, GL_DEPTH_BUFFER_BIT = 1 << 11, GL_STENCIL_BUFFER_BIT = 1 << 12 }; #define GL_FALSE 0 #define GL_TRUE 1 #define MAX_VERTICES 500000 #define GL_MAX_VERTEX_ATTRIBS 16 #define GL_MAX_VERTEX_OUTPUT_COMPONENTS 64 #define GL_MAX_DRAW_BUFFERS 8 #define GL_MAX_COLOR_ATTACHMENTS 8 //TODO use prefix like GL_SMOOTH? PGL_SMOOTH? enum { SMOOTH, FLAT, NOPERSPECTIVE }; //TODO NOT USED YET typedef struct PerVertex { vec4 gl_Position; float gl_PointSize; float gl_ClipDistance[6]; } PerVertex; typedef struct Shader_Builtins { //PerVertex gl_PerVertex; vec4 gl_Position; GLint gl_InstanceID; vec2 gl_PointCoord; GLboolean gl_FrontFacing; vec4 gl_FragCoord; vec4 gl_FragColor; //vec4 gl_FragData[GL_MAX_DRAW_BUFFERS]; float gl_FragDepth; GLboolean discard; } Shader_Builtins; typedef void (*vert_func)(float* vs_output, void* vertex_attribs, Shader_Builtins* builtins, void* uniforms); typedef void (*frag_func)(float* fs_input, Shader_Builtins* builtins, void* uniforms); typedef struct glProgram { vert_func vertex_shader; frag_func fragment_shader; void* uniform; int vs_output_size; GLenum interpolation[GL_MAX_VERTEX_OUTPUT_COMPONENTS]; // Need to come up with a better name to mean "I write to glFragDepth or discard // pixels in this shader so you can't do pre-shader depth testing... not that I currently // support that anyway at this point but maybe eventually GLboolean fragdepth_or_discard; GLboolean deleted; } glProgram; typedef struct glBuffer { /* GLenum usage; GLenum access; GLint access_flags; void* map_pointer; GLsizei map_offset; GLsizei map_length; */ GLsizei size; GLenum type; u8* data; GLboolean deleted; // true if the user uses one of the pgl data extension functions that // doesn't copy the data. // If true, PGL does not free it when deleting the buffer GLboolean user_owned; } glBuffer; typedef struct glVertex_Attrib { GLint size; // number of components 1-4 GLenum type; // GL_FLOAT, default GLsizei stride; // GLsizei offset; // GLboolean normalized; unsigned int buf; GLboolean enabled; GLuint divisor; } glVertex_Attrib; void init_glVertex_Attrib(glVertex_Attrib* v); //void init_glVertex_Attrib(glVertex_Attrib* v, GLint size, GLenum type, GLsizei stride, GLsizei offset, GLboolean normalized, Buffer* buf); typedef struct glVertex_Array { glVertex_Attrib vertex_attribs[GL_MAX_VERTEX_ATTRIBS]; //GLuint n_array_bufs; GLuint element_buffer; GLboolean deleted; } glVertex_Array; void init_glVertex_Array(glVertex_Array* v); typedef struct glTexture { unsigned int w; unsigned int h; unsigned int d; int base_level; // vec4 border_color; // no longer support borders not worth it GLenum mag_filter; GLenum min_filter; GLenum wrap_s; GLenum wrap_t; GLenum wrap_r; // TODO? //GLenum datatype; // only support GL_UNSIGNED_BYTE so not worth having yet GLenum format; // GL_RED, GL_RG, GL_RGB/BGR, GL_RGBA/BGRA GLenum type; // GL_TEXTURE_UNBOUND, GL_TEXTURE_2D etc. GLboolean deleted; // TODO same meaning as in glBuffer GLboolean user_owned; u8* data; } glTexture; typedef struct glVertex { vec4 clip_space; vec4 screen_space; int clip_code; int edge_flag; float* vs_out; } glVertex; typedef struct glFramebuffer { u8* buf; u8* lastrow; //better or worse than + h-1 every pixel draw? size_t w; size_t h; } glFramebuffer; typedef struct Vertex_Shader_output { int size; GLenum* interpolation; // TODO Should this be a vector? or just a pointer? // All I currently use is the constructor, reserve and free... // I could remove the rest of the cvector_float functions to save on bloat // but still easily add back functions as needed... // // or like comment in init_glContext says just allocate to the max size and be done cvector_float output_buf; } Vertex_Shader_output; typedef void (*draw_triangle_func)(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke); #ifndef CVECTOR_glVertex_Array_H #define CVECTOR_glVertex_Array_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for glVertex_Array vector. */ typedef struct cvector_glVertex_Array { glVertex_Array* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_glVertex_Array; extern size_t CVEC_glVertex_Array_SZ; int cvec_glVertex_Array(cvector_glVertex_Array* vec, size_t size, size_t capacity); int cvec_init_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array* vals, size_t num); cvector_glVertex_Array* cvec_glVertex_Array_heap(size_t size, size_t capacity); cvector_glVertex_Array* cvec_init_glVertex_Array_heap(glVertex_Array* vals, size_t num); int cvec_copyc_glVertex_Array(void* dest, void* src); int cvec_copy_glVertex_Array(cvector_glVertex_Array* dest, cvector_glVertex_Array* src); int cvec_push_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array a); glVertex_Array cvec_pop_glVertex_Array(cvector_glVertex_Array* vec); int cvec_extend_glVertex_Array(cvector_glVertex_Array* vec, size_t num); int cvec_insert_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a); int cvec_insert_array_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array* a, size_t num); glVertex_Array cvec_replace_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a); void cvec_erase_glVertex_Array(cvector_glVertex_Array* vec, size_t start, size_t end); int cvec_reserve_glVertex_Array(cvector_glVertex_Array* vec, size_t size); int cvec_set_cap_glVertex_Array(cvector_glVertex_Array* vec, size_t size); void cvec_set_val_sz_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val); void cvec_set_val_cap_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val); glVertex_Array* cvec_back_glVertex_Array(cvector_glVertex_Array* vec); void cvec_clear_glVertex_Array(cvector_glVertex_Array* vec); void cvec_free_glVertex_Array_heap(void* vec); void cvec_free_glVertex_Array(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_glVertex_Array_H */ #endif #ifdef CVECTOR_glVertex_Array_IMPLEMENTATION size_t CVEC_glVertex_Array_SZ = 50; #define CVEC_glVertex_Array_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_glVertex_Array* cvec_glVertex_Array_heap(size_t size, size_t capacity) { cvector_glVertex_Array* vec; if (!(vec = (cvector_glVertex_Array*)CVEC_MALLOC(sizeof(cvector_glVertex_Array)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_Array_SZ; if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_glVertex_Array* cvec_init_glVertex_Array_heap(glVertex_Array* vals, size_t num) { cvector_glVertex_Array* vec; if (!(vec = (cvector_glVertex_Array*)CVEC_MALLOC(sizeof(cvector_glVertex_Array)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_glVertex_Array_SZ; vec->size = num; if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex_Array)*num); return vec; } int cvec_glVertex_Array(cvector_glVertex_Array* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_Array_SZ; if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array* vals, size_t num) { vec->capacity = num + CVEC_glVertex_Array_SZ; vec->size = num; if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex_Array)*num); return 1; } int cvec_copyc_glVertex_Array(void* dest, void* src) { cvector_glVertex_Array* vec1 = (cvector_glVertex_Array*)dest; cvector_glVertex_Array* vec2 = (cvector_glVertex_Array*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_glVertex_Array(vec1, vec2); } int cvec_copy_glVertex_Array(cvector_glVertex_Array* dest, cvector_glVertex_Array* src) { glVertex_Array* tmp = NULL; if (!(tmp = (glVertex_Array*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glVertex_Array)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glVertex_Array)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array a) { glVertex_Array* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_glVertex_Array_ALLOCATOR(vec->capacity); if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } glVertex_Array cvec_pop_glVertex_Array(cvector_glVertex_Array* vec) { return vec->a[--vec->size]; } glVertex_Array* cvec_back_glVertex_Array(cvector_glVertex_Array* vec) { return &vec->a[vec->size-1]; } int cvec_extend_glVertex_Array(cvector_glVertex_Array* vec, size_t num) { glVertex_Array* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glVertex_Array_SZ; if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a) { glVertex_Array* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array)); vec->a[i] = a; } else { tmp_sz = CVEC_glVertex_Array_ALLOCATOR(vec->capacity); if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array* a, size_t num) { glVertex_Array* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glVertex_Array_SZ; if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glVertex_Array)); vec->size += num; return 1; } glVertex_Array cvec_replace_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a) { glVertex_Array tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_glVertex_Array(cvector_glVertex_Array* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glVertex_Array)); vec->size -= d; } int cvec_reserve_glVertex_Array(cvector_glVertex_Array* vec, size_t size) { glVertex_Array* tmp; if (vec->capacity < size) { if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*(size+CVEC_glVertex_Array_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_glVertex_Array_SZ; } return 1; } int cvec_set_cap_glVertex_Array(cvector_glVertex_Array* vec, size_t size) { glVertex_Array* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_glVertex_Array(cvector_glVertex_Array* vec) { vec->size = 0; } void cvec_free_glVertex_Array_heap(void* vec) { cvector_glVertex_Array* tmp = (cvector_glVertex_Array*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_glVertex_Array(void* vec) { cvector_glVertex_Array* tmp = (cvector_glVertex_Array*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif #ifndef CVECTOR_glBuffer_H #define CVECTOR_glBuffer_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for glBuffer vector. */ typedef struct cvector_glBuffer { glBuffer* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_glBuffer; extern size_t CVEC_glBuffer_SZ; int cvec_glBuffer(cvector_glBuffer* vec, size_t size, size_t capacity); int cvec_init_glBuffer(cvector_glBuffer* vec, glBuffer* vals, size_t num); cvector_glBuffer* cvec_glBuffer_heap(size_t size, size_t capacity); cvector_glBuffer* cvec_init_glBuffer_heap(glBuffer* vals, size_t num); int cvec_copyc_glBuffer(void* dest, void* src); int cvec_copy_glBuffer(cvector_glBuffer* dest, cvector_glBuffer* src); int cvec_push_glBuffer(cvector_glBuffer* vec, glBuffer a); glBuffer cvec_pop_glBuffer(cvector_glBuffer* vec); int cvec_extend_glBuffer(cvector_glBuffer* vec, size_t num); int cvec_insert_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a); int cvec_insert_array_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer* a, size_t num); glBuffer cvec_replace_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a); void cvec_erase_glBuffer(cvector_glBuffer* vec, size_t start, size_t end); int cvec_reserve_glBuffer(cvector_glBuffer* vec, size_t size); int cvec_set_cap_glBuffer(cvector_glBuffer* vec, size_t size); void cvec_set_val_sz_glBuffer(cvector_glBuffer* vec, glBuffer val); void cvec_set_val_cap_glBuffer(cvector_glBuffer* vec, glBuffer val); glBuffer* cvec_back_glBuffer(cvector_glBuffer* vec); void cvec_clear_glBuffer(cvector_glBuffer* vec); void cvec_free_glBuffer_heap(void* vec); void cvec_free_glBuffer(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_glBuffer_H */ #endif #ifdef CVECTOR_glBuffer_IMPLEMENTATION size_t CVEC_glBuffer_SZ = 50; #define CVEC_glBuffer_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_glBuffer* cvec_glBuffer_heap(size_t size, size_t capacity) { cvector_glBuffer* vec; if (!(vec = (cvector_glBuffer*)CVEC_MALLOC(sizeof(cvector_glBuffer)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glBuffer_SZ; if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_glBuffer* cvec_init_glBuffer_heap(glBuffer* vals, size_t num) { cvector_glBuffer* vec; if (!(vec = (cvector_glBuffer*)CVEC_MALLOC(sizeof(cvector_glBuffer)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_glBuffer_SZ; vec->size = num; if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(glBuffer)*num); return vec; } int cvec_glBuffer(cvector_glBuffer* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glBuffer_SZ; if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_glBuffer(cvector_glBuffer* vec, glBuffer* vals, size_t num) { vec->capacity = num + CVEC_glBuffer_SZ; vec->size = num; if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(glBuffer)*num); return 1; } int cvec_copyc_glBuffer(void* dest, void* src) { cvector_glBuffer* vec1 = (cvector_glBuffer*)dest; cvector_glBuffer* vec2 = (cvector_glBuffer*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_glBuffer(vec1, vec2); } int cvec_copy_glBuffer(cvector_glBuffer* dest, cvector_glBuffer* src) { glBuffer* tmp = NULL; if (!(tmp = (glBuffer*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glBuffer)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glBuffer)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_glBuffer(cvector_glBuffer* vec, glBuffer a) { glBuffer* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_glBuffer_ALLOCATOR(vec->capacity); if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } glBuffer cvec_pop_glBuffer(cvector_glBuffer* vec) { return vec->a[--vec->size]; } glBuffer* cvec_back_glBuffer(cvector_glBuffer* vec) { return &vec->a[vec->size-1]; } int cvec_extend_glBuffer(cvector_glBuffer* vec, size_t num) { glBuffer* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glBuffer_SZ; if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a) { glBuffer* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glBuffer)); vec->a[i] = a; } else { tmp_sz = CVEC_glBuffer_ALLOCATOR(vec->capacity); if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glBuffer)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer* a, size_t num) { glBuffer* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glBuffer_SZ; if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glBuffer)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glBuffer)); vec->size += num; return 1; } glBuffer cvec_replace_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a) { glBuffer tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_glBuffer(cvector_glBuffer* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glBuffer)); vec->size -= d; } int cvec_reserve_glBuffer(cvector_glBuffer* vec, size_t size) { glBuffer* tmp; if (vec->capacity < size) { if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*(size+CVEC_glBuffer_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_glBuffer_SZ; } return 1; } int cvec_set_cap_glBuffer(cvector_glBuffer* vec, size_t size) { glBuffer* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_glBuffer(cvector_glBuffer* vec, glBuffer val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_glBuffer(cvector_glBuffer* vec, glBuffer val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_glBuffer(cvector_glBuffer* vec) { vec->size = 0; } void cvec_free_glBuffer_heap(void* vec) { cvector_glBuffer* tmp = (cvector_glBuffer*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_glBuffer(void* vec) { cvector_glBuffer* tmp = (cvector_glBuffer*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif #ifndef CVECTOR_glTexture_H #define CVECTOR_glTexture_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for glTexture vector. */ typedef struct cvector_glTexture { glTexture* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_glTexture; extern size_t CVEC_glTexture_SZ; int cvec_glTexture(cvector_glTexture* vec, size_t size, size_t capacity); int cvec_init_glTexture(cvector_glTexture* vec, glTexture* vals, size_t num); cvector_glTexture* cvec_glTexture_heap(size_t size, size_t capacity); cvector_glTexture* cvec_init_glTexture_heap(glTexture* vals, size_t num); int cvec_copyc_glTexture(void* dest, void* src); int cvec_copy_glTexture(cvector_glTexture* dest, cvector_glTexture* src); int cvec_push_glTexture(cvector_glTexture* vec, glTexture a); glTexture cvec_pop_glTexture(cvector_glTexture* vec); int cvec_extend_glTexture(cvector_glTexture* vec, size_t num); int cvec_insert_glTexture(cvector_glTexture* vec, size_t i, glTexture a); int cvec_insert_array_glTexture(cvector_glTexture* vec, size_t i, glTexture* a, size_t num); glTexture cvec_replace_glTexture(cvector_glTexture* vec, size_t i, glTexture a); void cvec_erase_glTexture(cvector_glTexture* vec, size_t start, size_t end); int cvec_reserve_glTexture(cvector_glTexture* vec, size_t size); int cvec_set_cap_glTexture(cvector_glTexture* vec, size_t size); void cvec_set_val_sz_glTexture(cvector_glTexture* vec, glTexture val); void cvec_set_val_cap_glTexture(cvector_glTexture* vec, glTexture val); glTexture* cvec_back_glTexture(cvector_glTexture* vec); void cvec_clear_glTexture(cvector_glTexture* vec); void cvec_free_glTexture_heap(void* vec); void cvec_free_glTexture(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_glTexture_H */ #endif #ifdef CVECTOR_glTexture_IMPLEMENTATION size_t CVEC_glTexture_SZ = 50; #define CVEC_glTexture_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_glTexture* cvec_glTexture_heap(size_t size, size_t capacity) { cvector_glTexture* vec; if (!(vec = (cvector_glTexture*)CVEC_MALLOC(sizeof(cvector_glTexture)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glTexture_SZ; if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_glTexture* cvec_init_glTexture_heap(glTexture* vals, size_t num) { cvector_glTexture* vec; if (!(vec = (cvector_glTexture*)CVEC_MALLOC(sizeof(cvector_glTexture)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_glTexture_SZ; vec->size = num; if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(glTexture)*num); return vec; } int cvec_glTexture(cvector_glTexture* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glTexture_SZ; if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_glTexture(cvector_glTexture* vec, glTexture* vals, size_t num) { vec->capacity = num + CVEC_glTexture_SZ; vec->size = num; if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(glTexture)*num); return 1; } int cvec_copyc_glTexture(void* dest, void* src) { cvector_glTexture* vec1 = (cvector_glTexture*)dest; cvector_glTexture* vec2 = (cvector_glTexture*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_glTexture(vec1, vec2); } int cvec_copy_glTexture(cvector_glTexture* dest, cvector_glTexture* src) { glTexture* tmp = NULL; if (!(tmp = (glTexture*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glTexture)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glTexture)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_glTexture(cvector_glTexture* vec, glTexture a) { glTexture* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_glTexture_ALLOCATOR(vec->capacity); if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } glTexture cvec_pop_glTexture(cvector_glTexture* vec) { return vec->a[--vec->size]; } glTexture* cvec_back_glTexture(cvector_glTexture* vec) { return &vec->a[vec->size-1]; } int cvec_extend_glTexture(cvector_glTexture* vec, size_t num) { glTexture* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glTexture_SZ; if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_glTexture(cvector_glTexture* vec, size_t i, glTexture a) { glTexture* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glTexture)); vec->a[i] = a; } else { tmp_sz = CVEC_glTexture_ALLOCATOR(vec->capacity); if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glTexture)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_glTexture(cvector_glTexture* vec, size_t i, glTexture* a, size_t num) { glTexture* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glTexture_SZ; if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glTexture)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glTexture)); vec->size += num; return 1; } glTexture cvec_replace_glTexture(cvector_glTexture* vec, size_t i, glTexture a) { glTexture tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_glTexture(cvector_glTexture* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glTexture)); vec->size -= d; } int cvec_reserve_glTexture(cvector_glTexture* vec, size_t size) { glTexture* tmp; if (vec->capacity < size) { if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*(size+CVEC_glTexture_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_glTexture_SZ; } return 1; } int cvec_set_cap_glTexture(cvector_glTexture* vec, size_t size) { glTexture* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_glTexture(cvector_glTexture* vec, glTexture val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_glTexture(cvector_glTexture* vec, glTexture val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_glTexture(cvector_glTexture* vec) { vec->size = 0; } void cvec_free_glTexture_heap(void* vec) { cvector_glTexture* tmp = (cvector_glTexture*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_glTexture(void* vec) { cvector_glTexture* tmp = (cvector_glTexture*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif #ifndef CVECTOR_glProgram_H #define CVECTOR_glProgram_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for glProgram vector. */ typedef struct cvector_glProgram { glProgram* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_glProgram; extern size_t CVEC_glProgram_SZ; int cvec_glProgram(cvector_glProgram* vec, size_t size, size_t capacity); int cvec_init_glProgram(cvector_glProgram* vec, glProgram* vals, size_t num); cvector_glProgram* cvec_glProgram_heap(size_t size, size_t capacity); cvector_glProgram* cvec_init_glProgram_heap(glProgram* vals, size_t num); int cvec_copyc_glProgram(void* dest, void* src); int cvec_copy_glProgram(cvector_glProgram* dest, cvector_glProgram* src); int cvec_push_glProgram(cvector_glProgram* vec, glProgram a); glProgram cvec_pop_glProgram(cvector_glProgram* vec); int cvec_extend_glProgram(cvector_glProgram* vec, size_t num); int cvec_insert_glProgram(cvector_glProgram* vec, size_t i, glProgram a); int cvec_insert_array_glProgram(cvector_glProgram* vec, size_t i, glProgram* a, size_t num); glProgram cvec_replace_glProgram(cvector_glProgram* vec, size_t i, glProgram a); void cvec_erase_glProgram(cvector_glProgram* vec, size_t start, size_t end); int cvec_reserve_glProgram(cvector_glProgram* vec, size_t size); int cvec_set_cap_glProgram(cvector_glProgram* vec, size_t size); void cvec_set_val_sz_glProgram(cvector_glProgram* vec, glProgram val); void cvec_set_val_cap_glProgram(cvector_glProgram* vec, glProgram val); glProgram* cvec_back_glProgram(cvector_glProgram* vec); void cvec_clear_glProgram(cvector_glProgram* vec); void cvec_free_glProgram_heap(void* vec); void cvec_free_glProgram(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_glProgram_H */ #endif #ifdef CVECTOR_glProgram_IMPLEMENTATION size_t CVEC_glProgram_SZ = 50; #define CVEC_glProgram_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_glProgram* cvec_glProgram_heap(size_t size, size_t capacity) { cvector_glProgram* vec; if (!(vec = (cvector_glProgram*)CVEC_MALLOC(sizeof(cvector_glProgram)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glProgram_SZ; if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_glProgram* cvec_init_glProgram_heap(glProgram* vals, size_t num) { cvector_glProgram* vec; if (!(vec = (cvector_glProgram*)CVEC_MALLOC(sizeof(cvector_glProgram)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_glProgram_SZ; vec->size = num; if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(glProgram)*num); return vec; } int cvec_glProgram(cvector_glProgram* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glProgram_SZ; if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_glProgram(cvector_glProgram* vec, glProgram* vals, size_t num) { vec->capacity = num + CVEC_glProgram_SZ; vec->size = num; if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(glProgram)*num); return 1; } int cvec_copyc_glProgram(void* dest, void* src) { cvector_glProgram* vec1 = (cvector_glProgram*)dest; cvector_glProgram* vec2 = (cvector_glProgram*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_glProgram(vec1, vec2); } int cvec_copy_glProgram(cvector_glProgram* dest, cvector_glProgram* src) { glProgram* tmp = NULL; if (!(tmp = (glProgram*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glProgram)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glProgram)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_glProgram(cvector_glProgram* vec, glProgram a) { glProgram* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_glProgram_ALLOCATOR(vec->capacity); if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } glProgram cvec_pop_glProgram(cvector_glProgram* vec) { return vec->a[--vec->size]; } glProgram* cvec_back_glProgram(cvector_glProgram* vec) { return &vec->a[vec->size-1]; } int cvec_extend_glProgram(cvector_glProgram* vec, size_t num) { glProgram* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glProgram_SZ; if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_glProgram(cvector_glProgram* vec, size_t i, glProgram a) { glProgram* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glProgram)); vec->a[i] = a; } else { tmp_sz = CVEC_glProgram_ALLOCATOR(vec->capacity); if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glProgram)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_glProgram(cvector_glProgram* vec, size_t i, glProgram* a, size_t num) { glProgram* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glProgram_SZ; if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glProgram)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glProgram)); vec->size += num; return 1; } glProgram cvec_replace_glProgram(cvector_glProgram* vec, size_t i, glProgram a) { glProgram tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_glProgram(cvector_glProgram* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glProgram)); vec->size -= d; } int cvec_reserve_glProgram(cvector_glProgram* vec, size_t size) { glProgram* tmp; if (vec->capacity < size) { if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*(size+CVEC_glProgram_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_glProgram_SZ; } return 1; } int cvec_set_cap_glProgram(cvector_glProgram* vec, size_t size) { glProgram* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_glProgram(cvector_glProgram* vec, glProgram val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_glProgram(cvector_glProgram* vec, glProgram val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_glProgram(cvector_glProgram* vec) { vec->size = 0; } void cvec_free_glProgram_heap(void* vec) { cvector_glProgram* tmp = (cvector_glProgram*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_glProgram(void* vec) { cvector_glProgram* tmp = (cvector_glProgram*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif #ifndef CVECTOR_glVertex_H #define CVECTOR_glVertex_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for glVertex vector. */ typedef struct cvector_glVertex { glVertex* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_glVertex; extern size_t CVEC_glVertex_SZ; int cvec_glVertex(cvector_glVertex* vec, size_t size, size_t capacity); int cvec_init_glVertex(cvector_glVertex* vec, glVertex* vals, size_t num); cvector_glVertex* cvec_glVertex_heap(size_t size, size_t capacity); cvector_glVertex* cvec_init_glVertex_heap(glVertex* vals, size_t num); int cvec_copyc_glVertex(void* dest, void* src); int cvec_copy_glVertex(cvector_glVertex* dest, cvector_glVertex* src); int cvec_push_glVertex(cvector_glVertex* vec, glVertex a); glVertex cvec_pop_glVertex(cvector_glVertex* vec); int cvec_extend_glVertex(cvector_glVertex* vec, size_t num); int cvec_insert_glVertex(cvector_glVertex* vec, size_t i, glVertex a); int cvec_insert_array_glVertex(cvector_glVertex* vec, size_t i, glVertex* a, size_t num); glVertex cvec_replace_glVertex(cvector_glVertex* vec, size_t i, glVertex a); void cvec_erase_glVertex(cvector_glVertex* vec, size_t start, size_t end); int cvec_reserve_glVertex(cvector_glVertex* vec, size_t size); int cvec_set_cap_glVertex(cvector_glVertex* vec, size_t size); void cvec_set_val_sz_glVertex(cvector_glVertex* vec, glVertex val); void cvec_set_val_cap_glVertex(cvector_glVertex* vec, glVertex val); glVertex* cvec_back_glVertex(cvector_glVertex* vec); void cvec_clear_glVertex(cvector_glVertex* vec); void cvec_free_glVertex_heap(void* vec); void cvec_free_glVertex(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_glVertex_H */ #endif #ifdef CVECTOR_glVertex_IMPLEMENTATION size_t CVEC_glVertex_SZ = 50; #define CVEC_glVertex_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_glVertex* cvec_glVertex_heap(size_t size, size_t capacity) { cvector_glVertex* vec; if (!(vec = (cvector_glVertex*)CVEC_MALLOC(sizeof(cvector_glVertex)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_SZ; if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_glVertex* cvec_init_glVertex_heap(glVertex* vals, size_t num) { cvector_glVertex* vec; if (!(vec = (cvector_glVertex*)CVEC_MALLOC(sizeof(cvector_glVertex)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_glVertex_SZ; vec->size = num; if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex)*num); return vec; } int cvec_glVertex(cvector_glVertex* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_SZ; if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_glVertex(cvector_glVertex* vec, glVertex* vals, size_t num) { vec->capacity = num + CVEC_glVertex_SZ; vec->size = num; if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex)*num); return 1; } int cvec_copyc_glVertex(void* dest, void* src) { cvector_glVertex* vec1 = (cvector_glVertex*)dest; cvector_glVertex* vec2 = (cvector_glVertex*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_glVertex(vec1, vec2); } int cvec_copy_glVertex(cvector_glVertex* dest, cvector_glVertex* src) { glVertex* tmp = NULL; if (!(tmp = (glVertex*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glVertex)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glVertex)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_glVertex(cvector_glVertex* vec, glVertex a) { glVertex* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_glVertex_ALLOCATOR(vec->capacity); if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } glVertex cvec_pop_glVertex(cvector_glVertex* vec) { return vec->a[--vec->size]; } glVertex* cvec_back_glVertex(cvector_glVertex* vec) { return &vec->a[vec->size-1]; } int cvec_extend_glVertex(cvector_glVertex* vec, size_t num) { glVertex* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glVertex_SZ; if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_glVertex(cvector_glVertex* vec, size_t i, glVertex a) { glVertex* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex)); vec->a[i] = a; } else { tmp_sz = CVEC_glVertex_ALLOCATOR(vec->capacity); if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_glVertex(cvector_glVertex* vec, size_t i, glVertex* a, size_t num) { glVertex* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glVertex_SZ; if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glVertex)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glVertex)); vec->size += num; return 1; } glVertex cvec_replace_glVertex(cvector_glVertex* vec, size_t i, glVertex a) { glVertex tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_glVertex(cvector_glVertex* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glVertex)); vec->size -= d; } int cvec_reserve_glVertex(cvector_glVertex* vec, size_t size) { glVertex* tmp; if (vec->capacity < size) { if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*(size+CVEC_glVertex_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_glVertex_SZ; } return 1; } int cvec_set_cap_glVertex(cvector_glVertex* vec, size_t size) { glVertex* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_glVertex(cvector_glVertex* vec, glVertex val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_glVertex(cvector_glVertex* vec, glVertex val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_glVertex(cvector_glVertex* vec) { vec->size = 0; } void cvec_free_glVertex_heap(void* vec) { cvector_glVertex* tmp = (cvector_glVertex*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_glVertex(void* vec) { cvector_glVertex* tmp = (cvector_glVertex*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif typedef struct glContext { mat4 vp_mat; int x_min, y_min; size_t x_max, y_max; cvector_glVertex_Array vertex_arrays; cvector_glBuffer buffers; cvector_glTexture textures; cvector_glProgram programs; GLuint cur_vertex_array; GLuint bound_buffers[GL_NUM_BUFFER_TYPES-GL_ARRAY_BUFFER]; GLuint bound_textures[GL_NUM_TEXTURE_TYPES-GL_TEXTURE_UNBOUND-1]; GLuint cur_texture2D; GLuint cur_program; GLenum error; void* uniform; vec4 vertex_attribs_vs[GL_MAX_VERTEX_ATTRIBS]; Shader_Builtins builtins; Vertex_Shader_output vs_output; float fs_input[GL_MAX_VERTEX_OUTPUT_COMPONENTS]; GLboolean depth_test; GLboolean line_smooth; GLboolean cull_face; GLboolean fragdepth_or_discard; GLboolean depth_clamp; GLboolean depth_mask; GLboolean blend; GLboolean logic_ops; GLboolean poly_offset; GLboolean scissor_test; // stencil test requires a lot of state, especially for // something that I think will rarely be used... is it even worth having? GLboolean stencil_test; GLuint stencil_writemask; GLuint stencil_writemask_back; GLint stencil_ref; GLint stencil_ref_back; GLuint stencil_valuemask; GLuint stencil_valuemask_back; GLenum stencil_func; GLenum stencil_func_back; GLenum stencil_sfail; GLenum stencil_dpfail; GLenum stencil_dppass; GLenum stencil_sfail_back; GLenum stencil_dpfail_back; GLenum stencil_dppass_back; GLenum logic_func; GLenum blend_sfactor; GLenum blend_dfactor; GLenum blend_equation; GLenum cull_mode; GLenum front_face; GLenum poly_mode_front; GLenum poly_mode_back; GLenum depth_func; GLenum point_spr_origin; GLenum provoking_vert; // I really need to decide whether to use GLtypes or plain C types GLfloat poly_factor; GLfloat poly_units; GLint scissor_lx; GLint scissor_ly; GLsizei scissor_ux; GLsizei scissor_uy; GLint unpack_alignment; GLint pack_alignment; GLint clear_stencil; Color clear_color; vec4 blend_color; GLfloat point_size; GLfloat clear_depth; GLfloat depth_range_near; GLfloat depth_range_far; draw_triangle_func draw_triangle_front; draw_triangle_func draw_triangle_back; glFramebuffer zbuf; glFramebuffer back_buffer; glFramebuffer stencil_buf; int user_alloced_backbuf; int bitdepth; u32 Rmask; u32 Gmask; u32 Bmask; u32 Amask; int Rshift; int Gshift; int Bshift; int Ashift; cvector_glVertex glverts; } glContext; /************************************* * GLSL(ish) functions *************************************/ float clampf_01(float f); float clampf(float f, float min, float max); int clampi(int i, int min, int max); //shader texture functions vec4 texture1D(GLuint tex, float x); vec4 texture2D(GLuint tex, float x, float y); vec4 texture3D(GLuint tex, float x, float y, float z); vec4 texture2DArray(GLuint tex, float x, float y, int z); vec4 texture_rect(GLuint tex, float x, float y); vec4 texture_cubemap(GLuint texture, float x, float y, float z); // TODO leave these non gl* functions here? prefix with pgl? int init_glContext(glContext* c, u32** back_buffer, int w, int h, int bitdepth, u32 Rmask, u32 Gmask, u32 Bmask, u32 Amask); void free_glContext(glContext* context); void set_glContext(glContext* context); void* pglResizeFramebuffer(size_t w, size_t h); void glViewport(int x, int y, GLsizei width, GLsizei height); GLubyte* glGetString(GLenum name); GLenum glGetError(); void glGetBooleanv(GLenum pname, GLboolean* params); void glGetDoublev(GLenum pname, GLdouble* params); void glGetFloatv(GLenum pname, GLfloat* params); void glGetIntegerv(GLenum pname, GLint* params); void glGetInteger64v(GLenum pname, GLint64* params); GLboolean glIsEnabled(GLenum cap); void glClearColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha); void glClearDepth(GLclampf depth); void glDepthFunc(GLenum func); void glDepthRange(GLclampf nearVal, GLclampf farVal); void glDepthMask(GLboolean flag); void glBlendFunc(GLenum sfactor, GLenum dfactor); void glBlendEquation(GLenum mode); void glBlendColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha); void glClear(GLbitfield mask); void glProvokingVertex(GLenum provokeMode); void glEnable(GLenum cap); void glDisable(GLenum cap); void glCullFace(GLenum mode); void glFrontFace(GLenum mode); void glPolygonMode(GLenum face, GLenum mode); void glPointSize(GLfloat size); void glPointParameteri(GLenum pname, GLint param); void glLineWidth(GLfloat width); void glLogicOp(GLenum opcode); void glPolygonOffset(GLfloat factor, GLfloat units); void glScissor(GLint x, GLint y, GLsizei width, GLsizei height); void glStencilFunc(GLenum func, GLint ref, GLuint mask); void glStencilFuncSeparate(GLenum face, GLenum func, GLint ref, GLuint mask); void glStencilOp(GLenum sfail, GLenum dpfail, GLenum dppass); void glStencilOpSeparate(GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); void glClearStencil(GLint s); void glStencilMask(GLuint mask); void glStencilMaskSeparate(GLenum face, GLuint mask); //textures void glGenTextures(GLsizei n, GLuint* textures); void glDeleteTextures(GLsizei n, GLuint* textures); void glBindTexture(GLenum target, GLuint texture); void glActiveTexture(GLenum texture); void glTexParameteri(GLenum target, GLenum pname, GLint param); void glTexParameterfv(GLenum target, GLenum pname, const GLfloat* params); void glPixelStorei(GLenum pname, GLint param); void glTexImage1D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid* data); void glTexImage2D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* data); void glTexImage3D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* data); void glTexSubImage1D(GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid* data); void glTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* data); void glTexSubImage3D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid* data); void glGenVertexArrays(GLsizei n, GLuint* arrays); void glDeleteVertexArrays(GLsizei n, const GLuint* arrays); void glBindVertexArray(GLuint array); void glGenBuffers(GLsizei n, GLuint* buffers); void glDeleteBuffers(GLsizei n, const GLuint* buffers); void glBindBuffer(GLenum target, GLuint buffer); void glBufferData(GLenum target, GLsizei size, const GLvoid* data, GLenum usage); void glBufferSubData(GLenum target, GLsizei offset, GLsizei size, const GLvoid* data); void glVertexAttribPointer(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLsizei offset); void glVertexAttribDivisor(GLuint index, GLuint divisor); void glEnableVertexAttribArray(GLuint index); void glDisableVertexAttribArray(GLuint index); void glDrawArrays(GLenum mode, GLint first, GLsizei count); void glDrawElements(GLenum mode, GLsizei count, GLenum type, GLsizei offset); void glDrawArraysInstanced(GLenum mode, GLint first, GLsizei count, GLsizei primcount); void glDrawArraysInstancedBaseInstance(GLenum mode, GLint first, GLsizei count, GLsizei primcount, GLuint baseinstance); void glDrawElementsInstanced(GLenum mode, GLsizei count, GLenum type, GLsizei offset, GLsizei primcount); void glDrawElementsInstancedBaseInstance(GLenum mode, GLsizei count, GLenum type, GLsizei offset, GLsizei primcount, GLuint baseinstance); //shaders GLuint pglCreateProgram(vert_func vertex_shader, frag_func fragment_shader, GLsizei n, GLenum* interpolation, GLboolean fragdepth_or_discard); void glDeleteProgram(GLuint program); void glUseProgram(GLuint program); void pglSetUniform(void* uniform); // Stubs to let real OpenGL libs compile with minimal modifications/ifdefs // add what you need void glGenerateMipmap(GLenum target); void glGetDoublev(GLenum pname, GLdouble* params); void glGetInteger64v(GLenum pname, GLint64* params); // Framebuffers/Renderbuffers void glGenFramebuffers(GLsizei n, GLuint* ids); void glBindFramebuffer(GLenum target, GLuint framebuffer); void glDeleteFramebuffers(GLsizei n, GLuint* framebuffers); void glFramebufferTexture(GLenum target, GLenum attachment, GLuint texture, GLint level); void glFramebufferTexture1D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); void glFramebufferTexture2D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); void glFramebufferTexture3D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint layer); GLboolean glIsFramebuffer(GLuint framebuffer); void glGenRenderbuffers(GLsizei n, GLuint* renderbuffers); void glBindRenderbuffer(GLenum target, GLuint renderbuffer); void glDeleteRenderbuffers(GLsizei n, const GLuint* renderbuffers); void glRenderbufferStorage(GLenum target, GLenum internalformat, GLsizei width, GLsizei height); GLboolean glIsRenderbuffer(GLuint renderbuffer); void glFramebufferRenderbuffer(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); GLenum glCheckFramebufferStatus(GLenum target); void glGetProgramiv(GLuint program, GLenum pname, GLint* params); void glGetProgramInfoLog(GLuint program, GLsizei maxLength, GLsizei* length, GLchar* infoLog); void glAttachShader(GLuint program, GLuint shader); void glCompileShader(GLuint shader); void glGetShaderInfoLog(GLuint shader, GLsizei maxLength, GLsizei* length, GLchar* infoLog); // use pglCreateProgram() GLuint glCreateProgram(); void glLinkProgram(GLuint program); void glShaderSource(GLuint shader, GLsizei count, const GLchar** string, const GLint* length); void glGetShaderiv(GLuint shader, GLenum pname, GLint* params); GLuint glCreateShader(GLenum shaderType); void glDeleteShader(GLuint shader); void glDetachShader(GLuint program, GLuint shader); GLint glGetUniformLocation(GLuint program, const GLchar* name); GLint glGetAttribLocation(GLuint program, const GLchar* name); void* glMapBuffer(GLenum target, GLenum access); void* glMapNamedBuffer(GLuint buffer, GLenum access); GLboolean glUnmapBuffer(GLenum target); GLboolean glUnmapNamedBuffer(GLuint buffer); void glUniform1f(GLint location, GLfloat v0); void glUniform2f(GLint location, GLfloat v0, GLfloat v1); void glUniform3f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2); void glUniform4f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); void glUniform1i(GLint location, GLint v0); void glUniform2i(GLint location, GLint v0, GLint v1); void glUniform3i(GLint location, GLint v0, GLint v1, GLint v2); void glUniform4i(GLint location, GLint v0, GLint v1, GLint v2, GLint v3); void glUniform1ui(GLuint location, GLuint v0); void glUniform2ui(GLuint location, GLuint v0, GLuint v1); void glUniform3ui(GLuint location, GLuint v0, GLuint v1, GLuint v2); void glUniform4ui(GLuint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); void glUniform1fv(GLint location, GLsizei count, const GLfloat* value); void glUniform2fv(GLint location, GLsizei count, const GLfloat* value); void glUniform3fv(GLint location, GLsizei count, const GLfloat* value); void glUniform4fv(GLint location, GLsizei count, const GLfloat* value); void glUniform1iv(GLint location, GLsizei count, const GLint* value); void glUniform2iv(GLint location, GLsizei count, const GLint* value); void glUniform3iv(GLint location, GLsizei count, const GLint* value); void glUniform4iv(GLint location, GLsizei count, const GLint* value); void glUniform1uiv(GLint location, GLsizei count, const GLuint* value); void glUniform2uiv(GLint location, GLsizei count, const GLuint* value); void glUniform3uiv(GLint location, GLsizei count, const GLuint* value); void glUniform4uiv(GLint location, GLsizei count, const GLuint* value); void glUniformMatrix2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value); void glUniformMatrix3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value); void glUniformMatrix4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value); void glUniformMatrix2x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value); void glUniformMatrix3x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value); void glUniformMatrix2x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value); void glUniformMatrix4x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value); void glUniformMatrix3x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value); void glUniformMatrix4x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value); void pglClearScreen(); //This isn't possible in regular OpenGL, changing the interpolation of vs output of //an existing shader. You'd have to switch between 2 almost identical shaders. void pglSetInterp(GLsizei n, GLenum* interpolation); //TODO //pglDrawRect(x, y, w, h) //pglDrawPoint(x, y) void pglDrawFrame(); // TODO should these be called pglMapped* since that's what they do? I don't think so, since it's too different from actual spec for mapped buffers void pglBufferData(GLenum target, GLsizei size, const GLvoid* data, GLenum usage); void pglTexImage1D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid* data); void pglTexImage2D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* data); void pglTexImage3D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* data); // I could make these return the data? void pglGetBufferData(GLuint buffer, GLvoid** data); void pglGetTextureData(GLuint texture, GLvoid** data); void put_pixel(Color color, int x, int y); //Should I have it take a glFramebuffer as paramater? void put_line(Color the_color, float x1, float y1, float x2, float y2); void put_triangle(Color c1, Color c2, Color c3, vec2 p1, vec2 p2, vec2 p3); #ifdef __cplusplus } #endif // end GL_H #endif #ifdef PORTABLEGL_IMPLEMENTATION extern inline float rsw_randf(); extern inline float rsw_randf_range(float min, float max); extern inline vec2 make_vec2(float x, float y); extern inline vec3 make_vec3(float x, float y, float z); extern inline vec4 make_vec4(float x, float y, float z, float w); extern inline ivec2 make_ivec2(int x, int y); extern inline ivec3 make_ivec3(int x, int y, int z); extern inline ivec4 make_ivec4(int x, int y, int z, int w); extern inline vec2 negate_vec2(vec2 v); extern inline vec3 negate_vec3(vec3 v); extern inline vec4 negate_vec4(vec4 v); extern inline void fprint_vec2(FILE* f, vec2 v, const char* append); extern inline void fprint_vec3(FILE* f, vec3 v, const char* append); extern inline void fprint_vec4(FILE* f, vec4 v, const char* append); extern inline void print_vec2(vec2 v, const char* append); extern inline void print_vec3(vec3 v, const char* append); extern inline void print_vec4(vec4 v, const char* append); extern inline int fread_vec2(FILE* f, vec2* v); extern inline int fread_vec3(FILE* f, vec3* v); extern inline int fread_vec4(FILE* f, vec4* v); extern inline void fprint_dvec2(FILE* f, dvec2 v, const char* append); extern inline void fprint_dvec3(FILE* f, dvec3 v, const char* append); extern inline void fprint_dvec4(FILE* f, dvec4 v, const char* append); extern inline int fread_dvec2(FILE* f, dvec2* v); extern inline int fread_dvec3(FILE* f, dvec3* v); extern inline int fread_dvec4(FILE* f, dvec4* v); extern inline void fprint_ivec2(FILE* f, ivec2 v, const char* append); extern inline void fprint_ivec3(FILE* f, ivec3 v, const char* append); extern inline void fprint_ivec4(FILE* f, ivec4 v, const char* append); extern inline int fread_ivec2(FILE* f, ivec2* v); extern inline int fread_ivec3(FILE* f, ivec3* v); extern inline int fread_ivec4(FILE* f, ivec4* v); extern inline void fprint_uvec2(FILE* f, uvec2 v, const char* append); extern inline void fprint_uvec3(FILE* f, uvec3 v, const char* append); extern inline void fprint_uvec4(FILE* f, uvec4 v, const char* append); extern inline int fread_uvec2(FILE* f, uvec2* v); extern inline int fread_uvec3(FILE* f, uvec3* v); extern inline int fread_uvec4(FILE* f, uvec4* v); extern inline float length_vec2(vec2 a); extern inline float length_vec3(vec3 a); extern inline vec2 norm_vec2(vec2 a); extern inline vec3 norm_vec3(vec3 a); extern inline void normalize_vec2(vec2* a); extern inline void normalize_vec3(vec3* a); extern inline vec2 add_vec2s(vec2 a, vec2 b); extern inline vec3 add_vec3s(vec3 a, vec3 b); extern inline vec4 add_vec4s(vec4 a, vec4 b); extern inline vec2 sub_vec2s(vec2 a, vec2 b); extern inline vec3 sub_vec3s(vec3 a, vec3 b); extern inline vec4 sub_vec4s(vec4 a, vec4 b); extern inline vec2 mult_vec2s(vec2 a, vec2 b); extern inline vec3 mult_vec3s(vec3 a, vec3 b); extern inline vec4 mult_vec4s(vec4 a, vec4 b); extern inline vec2 div_vec2s(vec2 a, vec2 b); extern inline vec3 div_vec3s(vec3 a, vec3 b); extern inline vec4 div_vec4s(vec4 a, vec4 b); extern inline float dot_vec2s(vec2 a, vec2 b); extern inline float dot_vec3s(vec3 a, vec3 b); extern inline float dot_vec4s(vec4 a, vec4 b); extern inline vec2 scale_vec2(vec2 a, float s); extern inline vec3 scale_vec3(vec3 a, float s); extern inline vec4 scale_vec4(vec4 a, float s); extern inline int equal_vec2s(vec2 a, vec2 b); extern inline int equal_vec3s(vec3 a, vec3 b); extern inline int equal_vec4s(vec4 a, vec4 b); extern inline int equal_epsilon_vec2s(vec2 a, vec2 b, float epsilon); extern inline int equal_epsilon_vec3s(vec3 a, vec3 b, float epsilon); extern inline int equal_epsilon_vec4s(vec4 a, vec4 b, float epsilon); extern inline vec2 vec4_to_vec2(vec4 a); extern inline vec3 vec4_to_vec3(vec4 a); extern inline vec2 vec4_to_vec2h(vec4 a); extern inline vec3 vec4_to_vec3h(vec4 a); extern inline vec3 cross_product(const vec3 u, const vec3 v); extern inline float angle_between_vec3(const vec3 u, const vec3 v); extern inline vec2 x_mat2(mat2 m); extern inline vec2 y_mat2(mat2 m); extern inline vec2 c1_mat2(mat2 m); extern inline vec2 c2_mat2(mat2 m); extern inline void setc1_mat2(mat2 m, vec2 v); extern inline void setc2_mat2(mat2 m, vec2 v); extern inline void setx_mat2(mat2 m, vec2 v); extern inline void sety_mat2(mat2 m, vec2 v); extern inline vec3 x_mat3(mat3 m); extern inline vec3 y_mat3(mat3 m); extern inline vec3 z_mat3(mat3 m); extern inline vec3 c1_mat3(mat3 m); extern inline vec3 c2_mat3(mat3 m); extern inline vec3 c3_mat3(mat3 m); extern inline void setc1_mat3(mat3 m, vec3 v); extern inline void setc2_mat3(mat3 m, vec3 v); extern inline void setc3_mat3(mat3 m, vec3 v); extern inline void setx_mat3(mat3 m, vec3 v); extern inline void sety_mat3(mat3 m, vec3 v); extern inline void setz_mat3(mat3 m, vec3 v); extern inline vec4 c1_mat4(mat4 m); extern inline vec4 c2_mat4(mat4 m); extern inline vec4 c3_mat4(mat4 m); extern inline vec4 c4_mat4(mat4 m); extern inline vec4 x_mat4(mat4 m); extern inline vec4 y_mat4(mat4 m); extern inline vec4 z_mat4(mat4 m); extern inline vec4 w_mat4(mat4 m); extern inline void setc1_mat4v3(mat4 m, vec3 v); extern inline void setc2_mat4v3(mat4 m, vec3 v); extern inline void setc3_mat4v3(mat4 m, vec3 v); extern inline void setc4_mat4v3(mat4 m, vec3 v); extern inline void setc1_mat4v4(mat4 m, vec4 v); extern inline void setc2_mat4v4(mat4 m, vec4 v); extern inline void setc3_mat4v4(mat4 m, vec4 v); extern inline void setc4_mat4v4(mat4 m, vec4 v); extern inline void setx_mat4v3(mat4 m, vec3 v); extern inline void sety_mat4v3(mat4 m, vec3 v); extern inline void setz_mat4v3(mat4 m, vec3 v); extern inline void setw_mat4v3(mat4 m, vec3 v); extern inline void setx_mat4v4(mat4 m, vec4 v); extern inline void sety_mat4v4(mat4 m, vec4 v); extern inline void setz_mat4v4(mat4 m, vec4 v); extern inline void setw_mat4v4(mat4 m, vec4 v); extern inline void fprint_mat2(FILE* f, mat2 m, const char* append); extern inline void fprint_mat3(FILE* f, mat3 m, const char* append); extern inline void fprint_mat4(FILE* f, mat4 m, const char* append); extern inline void print_mat2(mat2 m, const char* append); extern inline void print_mat3(mat3 m, const char* append); extern inline void print_mat4(mat4 m, const char* append); extern inline vec2 mult_mat2_vec2(mat2 m, vec2 v); extern inline vec3 mult_mat3_vec3(mat3 m, vec3 v); extern inline vec4 mult_mat4_vec4(mat4 m, vec4 v); extern inline void scale_mat3(mat3 m, float x, float y, float z); extern inline void scale_mat4(mat4 m, float x, float y, float z); extern inline void translation_mat4(mat4 m, float x, float y, float z); extern inline void extract_rotation_mat4(mat3 dst, mat4 src, int normalize); extern inline Color make_Color(u8 red, u8 green, u8 blue, u8 alpha); extern inline Color vec4_to_Color(vec4 v); extern inline void print_Color(Color c, const char* append); extern inline vec4 Color_to_vec4(Color c); extern inline Line make_Line(float x1, float y1, float x2, float y2); extern inline float line_func(Line* line, float x, float y); extern inline float line_findy(Line* line, float x); extern inline float line_findx(Line* line, float y); void load_rotation_mat3(mat3 mat, vec3 v, float angle) { float s, c; float xx, yy, zz, xy, yz, zx, xs, ys, zs, one_c; s = sin(angle); c = cos(angle); // Rotation matrix is normalized normalize_vec3(&v); xx = v.x * v.x; yy = v.y * v.y; zz = v.z * v.z; xy = v.x * v.y; yz = v.y * v.z; zx = v.z * v.x; xs = v.x * s; ys = v.y * s; zs = v.z * s; one_c = 1.0f - c; #ifndef ROW_MAJOR mat[0] = (one_c * xx) + c; mat[3] = (one_c * xy) - zs; mat[6] = (one_c * zx) + ys; mat[1] = (one_c * xy) + zs; mat[4] = (one_c * yy) + c; mat[7] = (one_c * yz) - xs; mat[2] = (one_c * zx) - ys; mat[5] = (one_c * yz) + xs; mat[8] = (one_c * zz) + c; #else mat[0] = (one_c * xx) + c; mat[1] = (one_c * xy) - zs; mat[2] = (one_c * zx) + ys; mat[3] = (one_c * xy) + zs; mat[4] = (one_c * yy) + c; mat[5] = (one_c * yz) - xs; mat[6] = (one_c * zx) - ys; mat[7] = (one_c * yz) + xs; mat[8] = (one_c * zz) + c; #endif } /* * mat4 */ //TODO use restrict? void mult_mat4_mat4(mat4 c, mat4 a, mat4 b) { #ifndef ROW_MAJOR c[ 0] = a[0]*b[ 0] + a[4]*b[ 1] + a[8]*b[ 2] + a[12]*b[ 3]; c[ 4] = a[0]*b[ 4] + a[4]*b[ 5] + a[8]*b[ 6] + a[12]*b[ 7]; c[ 8] = a[0]*b[ 8] + a[4]*b[ 9] + a[8]*b[10] + a[12]*b[11]; c[12] = a[0]*b[12] + a[4]*b[13] + a[8]*b[14] + a[12]*b[15]; c[ 1] = a[1]*b[ 0] + a[5]*b[ 1] + a[9]*b[ 2] + a[13]*b[ 3]; c[ 5] = a[1]*b[ 4] + a[5]*b[ 5] + a[9]*b[ 6] + a[13]*b[ 7]; c[ 9] = a[1]*b[ 8] + a[5]*b[ 9] + a[9]*b[10] + a[13]*b[11]; c[13] = a[1]*b[12] + a[5]*b[13] + a[9]*b[14] + a[13]*b[15]; c[ 2] = a[2]*b[ 0] + a[6]*b[ 1] + a[10]*b[ 2] + a[14]*b[ 3]; c[ 6] = a[2]*b[ 4] + a[6]*b[ 5] + a[10]*b[ 6] + a[14]*b[ 7]; c[10] = a[2]*b[ 8] + a[6]*b[ 9] + a[10]*b[10] + a[14]*b[11]; c[14] = a[2]*b[12] + a[6]*b[13] + a[10]*b[14] + a[14]*b[15]; c[ 3] = a[3]*b[ 0] + a[7]*b[ 1] + a[11]*b[ 2] + a[15]*b[ 3]; c[ 7] = a[3]*b[ 4] + a[7]*b[ 5] + a[11]*b[ 6] + a[15]*b[ 7]; c[11] = a[3]*b[ 8] + a[7]*b[ 9] + a[11]*b[10] + a[15]*b[11]; c[15] = a[3]*b[12] + a[7]*b[13] + a[11]*b[14] + a[15]*b[15]; #else c[0] = a[0]*b[0] + a[1]*b[4] + a[2]*b[8] + a[3]*b[12]; c[1] = a[0]*b[1] + a[1]*b[5] + a[2]*b[9] + a[3]*b[13]; c[2] = a[0]*b[2] + a[1]*b[6] + a[2]*b[10] + a[3]*b[14]; c[3] = a[0]*b[3] + a[1]*b[7] + a[2]*b[11] + a[3]*b[15]; c[4] = a[4]*b[0] + a[5]*b[4] + a[6]*b[8] + a[7]*b[12]; c[5] = a[4]*b[1] + a[5]*b[5] + a[6]*b[9] + a[7]*b[13]; c[6] = a[4]*b[2] + a[5]*b[6] + a[6]*b[10] + a[7]*b[14]; c[7] = a[4]*b[3] + a[5]*b[7] + a[6]*b[11] + a[7]*b[15]; c[ 8] = a[8]*b[0] + a[9]*b[4] + a[10]*b[8] + a[11]*b[12]; c[ 9] = a[8]*b[1] + a[9]*b[5] + a[10]*b[9] + a[11]*b[13]; c[10] = a[8]*b[2] + a[9]*b[6] + a[10]*b[10] + a[11]*b[14]; c[11] = a[8]*b[3] + a[9]*b[7] + a[10]*b[11] + a[11]*b[15]; c[12] = a[12]*b[0] + a[13]*b[4] + a[14]*b[8] + a[15]*b[12]; c[13] = a[12]*b[1] + a[13]*b[5] + a[14]*b[9] + a[15]*b[13]; c[14] = a[12]*b[2] + a[13]*b[6] + a[14]*b[10] + a[15]*b[14]; c[15] = a[12]*b[3] + a[13]*b[7] + a[14]*b[11] + a[15]*b[15]; #endif } void load_rotation_mat4(mat4 mat, vec3 v, float angle) { float s, c; float xx, yy, zz, xy, yz, zx, xs, ys, zs, one_c; s = sin(angle); c = cos(angle); // Rotation matrix is normalized normalize_vec3(&v); xx = v.x * v.x; yy = v.y * v.y; zz = v.z * v.z; xy = v.x * v.y; yz = v.y * v.z; zx = v.z * v.x; xs = v.x * s; ys = v.y * s; zs = v.z * s; one_c = 1.0f - c; #ifndef ROW_MAJOR mat[ 0] = (one_c * xx) + c; mat[ 4] = (one_c * xy) - zs; mat[ 8] = (one_c * zx) + ys; mat[12] = 0.0f; mat[ 1] = (one_c * xy) + zs; mat[ 5] = (one_c * yy) + c; mat[ 9] = (one_c * yz) - xs; mat[13] = 0.0f; mat[ 2] = (one_c * zx) - ys; mat[ 6] = (one_c * yz) + xs; mat[10] = (one_c * zz) + c; mat[14] = 0.0f; mat[ 3] = 0.0f; mat[ 7] = 0.0f; mat[11] = 0.0f; mat[15] = 1.0f; #else mat[0] = (one_c * xx) + c; mat[1] = (one_c * xy) - zs; mat[2] = (one_c * zx) + ys; mat[3] = 0.0f; mat[4] = (one_c * xy) + zs; mat[5] = (one_c * yy) + c; mat[6] = (one_c * yz) - xs; mat[7] = 0.0f; mat[8] = (one_c * zx) - ys; mat[9] = (one_c * yz) + xs; mat[10] = (one_c * zz) + c; mat[11] = 0.0f; mat[12] = 0.0f; mat[13] = 0.0f; mat[14] = 0.0f; mat[15] = 1.0f; #endif } /* TODO static float det_ij(const mat4 m, const int i, const int j) { float ret, mat[3][3]; int x = 0, y = 0; for (int ii=0; ii<4; ++ii) { y = 0; if (ii == i) continue; for (int jj=0; jj<4; ++jj) { if (jj == j) continue; mat[x][y] = m[ii*4+jj]; y++; } x++; } ret = mat[0][0]*(mat[1][1]*mat[2][2]-mat[2][1]*mat[1][2]); ret -= mat[0][1]*(mat[1][0]*mat[2][2]-mat[2][0]*mat[1][2]); ret += mat[0][2]*(mat[1][0]*mat[2][1]-mat[2][0]*mat[1][1]); return ret; } void invert_mat4(mat4 mInverse, const mat4& m) { int i, j; float det, detij; mat4 inverse_mat; // calculate 4x4 determinant det = 0.0f; for (i = 0; i < 4; i++) { det += (i & 0x1) ? (-m.matrix[i] * det_ij(m, 0, i)) : (m.matrix[i] * det_ij(m, 0, i)); } det = 1.0f / det; // calculate inverse for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { detij = det_ij(m, j, i); inverse_mat[(i*4)+j] = ((i+j) & 0x1) ? (-detij * det) : (detij *det); } } } */ //////////////////////////////////////////////////////////////////////////////////////////// //assumes converting from canonical view volume [-1,1]^3 //works just like glViewport, x and y are lower left corner. opengl should be 1. void make_viewport_matrix(mat4 mat, int x, int y, unsigned int width, unsigned int height, int opengl) { float w, h, l, t, b, r; if (opengl) { //See glspec page 104, integer grid is lower left pixel corners w = width, h = height; l = x, b = y; //range is [0, w) x [0 , h) //TODO pick best epsilon? r = l + w - 0.01; //epsilon larger than float precision t = b + h - 0.01; #ifndef ROW_MAJOR mat[ 0] = (r - l) / 2; mat[ 4] = 0; mat[ 8] = 0; mat[12] = (l + r) / 2; mat[ 1] = 0; //see below mat[ 5] = (t - b) / 2; mat[ 9] = 0; mat[13] = (b + t) / 2; mat[ 2] = 0; mat[ 6] = 0; mat[10] = 1; mat[14] = 0; mat[ 3] = 0; mat[ 7] = 0; mat[11] = 0; mat[15] = 1; #else mat[0] = (r - l) / 2; mat[1] = 0; mat[2] = 0; mat[3] = (l + r) / 2; mat[4] = 0; //this used to be negative to flip y till I changed glFramebuffer and draw_pixel to accomplish the same thing mat[5] = (t - b) / 2; mat[6] = 0; mat[7] = (b + t) / 2; mat[8] = 0; mat[9] = 0; mat[10] = 1; mat[11] = 0; mat[12] = 0; mat[13] = 0; mat[14] = 0; mat[15] = 1; #endif } else { //old way with pixel centers at integer coordinates //see pages 133/4 and 144 of FoCG //necessary for fast integer only bresenham line drawing w = width, h = height; l = x - 0.5f; b = y - 0.5f; r = l + w; t = b + h; #ifndef ROW_MAJOR mat[ 0] = (r - l) / 2; mat[ 4] = 0; mat[ 8] = 0; mat[12] = (l + r) / 2; mat[ 1] = 0; //see below mat[ 5] = (t - b) / 2; mat[ 9] = 0; mat[13] = (b + t) / 2; mat[ 2] = 0; mat[ 6] = 0; mat[10] = 1; mat[14] = 0; mat[ 3] = 0; mat[ 7] = 0; mat[11] = 0; mat[15] = 1; #else mat[0] = (r - l) / 2; mat[1] = 0; mat[2] = 0; mat[3] = (l + r) / 2; mat[4] = 0; //make this negative to reflect y otherwise positive y maps to lower half of the screen //this is mapping the unit square [-1,1]^2 to the window size. x is fine because it increases left to right //but the screen coordinates (ie framebuffer memory) increase top to bottom opposite of the canonical square //negating this is the easiest way to fix it without any side effects. mat[5] = (t - b) / 2; mat[6] = 0; mat[7] = (b + t) / 2; mat[8] = 0; mat[9] = 0; mat[10] = 1; mat[11] = 0; mat[12] = 0; mat[13] = 0; mat[14] = 0; mat[15] = 1; #endif } } //I can't really think of any reason to ever use this matrix alone. //You'd always do ortho * pers and really if you're doing perspective projection //just use make_perspective_matrix (or less likely make perspective_proj_matrix) // //This function is really just for completeness sake based off of FoCG 3rd edition pg 152 //changed slightly. z_near and z_far are always positive and z_near < z_far // //Inconsistently, to generate an ortho matrix to multiply with that will get the equivalent //of the other 2 functions you'd use -z_near and -z_far and near > far. void make_pers_matrix(mat4 mat, float z_near, float z_far) { #ifndef ROW_MAJOR mat[ 0] = z_near; mat[ 4] = 0; mat[ 8] = 0; mat[12] = 0; mat[ 1] = 0; mat[ 5] = z_near; mat[ 9] = 0; mat[13] = 0; mat[ 2] = 0; mat[ 6] = 0; mat[10] = z_near + z_far; mat[14] = (z_far * z_near); mat[ 3] = 0; mat[ 7] = 0; mat[11] = -1; mat[15] = 0; #else mat[0] = z_near; mat[1] = 0; mat[2] = 0; mat[3] = 0; mat[4] = 0; mat[5] = z_near; mat[6] = 0; mat[7] = 0; mat[ 8] = 0; mat[ 9] = 0; mat[10] = z_near + z_far; mat[11] = (z_far * z_near); mat[12] = 0; mat[13] = 0; mat[14] = -1; mat[15] = 0; #endif } // Create a projection matrix // Similiar to the old gluPerspective... fov is in radians btw... void make_perspective_matrix(mat4 mat, float fov, float aspect, float n, float f) { float t = n * tanf(fov * 0.5f); float b = -t; float l = b * aspect; float r = -l; make_perspective_proj_matrix(mat, l, r, b, t, n, f); } void make_perspective_proj_matrix(mat4 mat, float l, float r, float b, float t, float n, float f) { #ifndef ROW_MAJOR mat[ 0] = (2.0f * n) / (r - l); mat[ 4] = 0.0f; mat[ 8] = (r + l) / (r - l); mat[12] = 0.0f; mat[ 1] = 0.0f; mat[ 5] = (2.0f * n) / (t - b); mat[ 9] = (t + b) / (t - b); mat[13] = 0.0f; mat[ 2] = 0.0f; mat[ 6] = 0.0f; mat[10] = -((f + n) / (f - n)); mat[14] = -((2.0f * (f*n))/(f - n)); mat[ 3] = 0.0f; mat[ 7] = 0.0f; mat[11] = -1.0f; mat[15] = 0.0f; #else mat[0] = (2.0f * n) / (r - l); mat[1] = 0.0f; mat[2] = (r + l) / (r - l); mat[3] = 0.0f; mat[4] = 0.0f; mat[5] = (2.0f * n) / (t - b); mat[6] = (t + b) / (t - b); mat[7] = 0.0f; mat[8] = 0.0f; mat[9] = 0.0f; mat[10] = -((f + n) / (f - n)); mat[11] = -((2.0f * (f*n))/(f - n)); mat[12] = 0.0f; mat[13] = 0.0f; mat[14] = -1.0f; mat[15] = 0.0f; #endif } //n and f really are near and far not min and max so if you want the standard looking down the -z axis // then n > f otherwise n < f void make_orthographic_matrix(mat4 mat, float l, float r, float b, float t, float n, float f) { #ifndef ROW_MAJOR mat[ 0] = 2.0f / (r - l); mat[ 4] = 0; mat[ 8] = 0; mat[12] = -((r + l)/(r - l)); mat[ 1] = 0; mat[ 5] = 2.0f / (t - b); mat[ 9] = 0; mat[13] = -((t + b)/(t - b)); mat[ 2] = 0; mat[ 6] = 0; mat[10] = 2.0f / (f - n); //removed - in front of 2 . . . book doesn't have it but superbible did mat[14] = -((n + f)/(f - n)); mat[ 3] = 0; mat[ 7] = 0; mat[11] = 0; mat[15] = 1; #else mat[0] = 2.0f / (r - l); mat[1] = 0; mat[2] = 0; mat[3] = -((r + l)/(r - l)); mat[4] = 0; mat[5] = 2.0f / (t - b); mat[6] = 0; mat[7] = -((t + b)/(t - b)); mat[8] = 0; mat[9] = 0; mat[10] = 2.0f / (f - n); //removed - in front of 2 . . . book doesn't have it but superbible did mat[11] = -((n + f)/(f - n)); mat[12] = 0; mat[13] = 0; mat[14] = 0; mat[15] = 1; #endif //now I know why the superbible had the - //OpenGL uses a left handed canonical view volume [-1,1]^3 when passed the identity matrix //ie in Normalized Device Coordinates. The math/matrix presented in Fundamentals of Computer //Graphics assumes a right handed version of the same volume. The negative isn't necessary //if you set n and f correctly as near and far not low and high } //per https://www.opengl.org/sdk/docs/man2/xhtml/gluLookAt.xml //and glm.g-truc.net (glm/gtc/matrix_transform.inl) void lookAt(mat4 mat, vec3 eye, vec3 center, vec3 up) { SET_IDENTITY_MAT4(mat); vec3 f = norm_vec3(sub_vec3s(center, eye)); vec3 s = norm_vec3(cross_product(f, up)); vec3 u = cross_product(s, f); setx_mat4v3(mat, s); sety_mat4v3(mat, u); setz_mat4v3(mat, negate_vec3(f)); setc4_mat4v3(mat, make_vec3(-dot_vec3s(s, eye), -dot_vec3s(u, eye), dot_vec3s(f, eye))); } #define CVECTOR_glVertex_Array_IMPLEMENTATION #ifndef CVECTOR_glVertex_Array_H #define CVECTOR_glVertex_Array_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for glVertex_Array vector. */ typedef struct cvector_glVertex_Array { glVertex_Array* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_glVertex_Array; extern size_t CVEC_glVertex_Array_SZ; int cvec_glVertex_Array(cvector_glVertex_Array* vec, size_t size, size_t capacity); int cvec_init_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array* vals, size_t num); cvector_glVertex_Array* cvec_glVertex_Array_heap(size_t size, size_t capacity); cvector_glVertex_Array* cvec_init_glVertex_Array_heap(glVertex_Array* vals, size_t num); int cvec_copyc_glVertex_Array(void* dest, void* src); int cvec_copy_glVertex_Array(cvector_glVertex_Array* dest, cvector_glVertex_Array* src); int cvec_push_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array a); glVertex_Array cvec_pop_glVertex_Array(cvector_glVertex_Array* vec); int cvec_extend_glVertex_Array(cvector_glVertex_Array* vec, size_t num); int cvec_insert_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a); int cvec_insert_array_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array* a, size_t num); glVertex_Array cvec_replace_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a); void cvec_erase_glVertex_Array(cvector_glVertex_Array* vec, size_t start, size_t end); int cvec_reserve_glVertex_Array(cvector_glVertex_Array* vec, size_t size); int cvec_set_cap_glVertex_Array(cvector_glVertex_Array* vec, size_t size); void cvec_set_val_sz_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val); void cvec_set_val_cap_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val); glVertex_Array* cvec_back_glVertex_Array(cvector_glVertex_Array* vec); void cvec_clear_glVertex_Array(cvector_glVertex_Array* vec); void cvec_free_glVertex_Array_heap(void* vec); void cvec_free_glVertex_Array(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_glVertex_Array_H */ #endif #ifdef CVECTOR_glVertex_Array_IMPLEMENTATION size_t CVEC_glVertex_Array_SZ = 50; #define CVEC_glVertex_Array_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_glVertex_Array* cvec_glVertex_Array_heap(size_t size, size_t capacity) { cvector_glVertex_Array* vec; if (!(vec = (cvector_glVertex_Array*)CVEC_MALLOC(sizeof(cvector_glVertex_Array)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_Array_SZ; if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_glVertex_Array* cvec_init_glVertex_Array_heap(glVertex_Array* vals, size_t num) { cvector_glVertex_Array* vec; if (!(vec = (cvector_glVertex_Array*)CVEC_MALLOC(sizeof(cvector_glVertex_Array)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_glVertex_Array_SZ; vec->size = num; if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex_Array)*num); return vec; } int cvec_glVertex_Array(cvector_glVertex_Array* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_Array_SZ; if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array* vals, size_t num) { vec->capacity = num + CVEC_glVertex_Array_SZ; vec->size = num; if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex_Array)*num); return 1; } int cvec_copyc_glVertex_Array(void* dest, void* src) { cvector_glVertex_Array* vec1 = (cvector_glVertex_Array*)dest; cvector_glVertex_Array* vec2 = (cvector_glVertex_Array*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_glVertex_Array(vec1, vec2); } int cvec_copy_glVertex_Array(cvector_glVertex_Array* dest, cvector_glVertex_Array* src) { glVertex_Array* tmp = NULL; if (!(tmp = (glVertex_Array*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glVertex_Array)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glVertex_Array)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array a) { glVertex_Array* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_glVertex_Array_ALLOCATOR(vec->capacity); if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } glVertex_Array cvec_pop_glVertex_Array(cvector_glVertex_Array* vec) { return vec->a[--vec->size]; } glVertex_Array* cvec_back_glVertex_Array(cvector_glVertex_Array* vec) { return &vec->a[vec->size-1]; } int cvec_extend_glVertex_Array(cvector_glVertex_Array* vec, size_t num) { glVertex_Array* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glVertex_Array_SZ; if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a) { glVertex_Array* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array)); vec->a[i] = a; } else { tmp_sz = CVEC_glVertex_Array_ALLOCATOR(vec->capacity); if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array* a, size_t num) { glVertex_Array* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glVertex_Array_SZ; if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glVertex_Array)); vec->size += num; return 1; } glVertex_Array cvec_replace_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a) { glVertex_Array tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_glVertex_Array(cvector_glVertex_Array* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glVertex_Array)); vec->size -= d; } int cvec_reserve_glVertex_Array(cvector_glVertex_Array* vec, size_t size) { glVertex_Array* tmp; if (vec->capacity < size) { if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*(size+CVEC_glVertex_Array_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_glVertex_Array_SZ; } return 1; } int cvec_set_cap_glVertex_Array(cvector_glVertex_Array* vec, size_t size) { glVertex_Array* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_glVertex_Array(cvector_glVertex_Array* vec) { vec->size = 0; } void cvec_free_glVertex_Array_heap(void* vec) { cvector_glVertex_Array* tmp = (cvector_glVertex_Array*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_glVertex_Array(void* vec) { cvector_glVertex_Array* tmp = (cvector_glVertex_Array*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif #define CVECTOR_glBuffer_IMPLEMENTATION #ifndef CVECTOR_glBuffer_H #define CVECTOR_glBuffer_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for glBuffer vector. */ typedef struct cvector_glBuffer { glBuffer* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_glBuffer; extern size_t CVEC_glBuffer_SZ; int cvec_glBuffer(cvector_glBuffer* vec, size_t size, size_t capacity); int cvec_init_glBuffer(cvector_glBuffer* vec, glBuffer* vals, size_t num); cvector_glBuffer* cvec_glBuffer_heap(size_t size, size_t capacity); cvector_glBuffer* cvec_init_glBuffer_heap(glBuffer* vals, size_t num); int cvec_copyc_glBuffer(void* dest, void* src); int cvec_copy_glBuffer(cvector_glBuffer* dest, cvector_glBuffer* src); int cvec_push_glBuffer(cvector_glBuffer* vec, glBuffer a); glBuffer cvec_pop_glBuffer(cvector_glBuffer* vec); int cvec_extend_glBuffer(cvector_glBuffer* vec, size_t num); int cvec_insert_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a); int cvec_insert_array_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer* a, size_t num); glBuffer cvec_replace_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a); void cvec_erase_glBuffer(cvector_glBuffer* vec, size_t start, size_t end); int cvec_reserve_glBuffer(cvector_glBuffer* vec, size_t size); int cvec_set_cap_glBuffer(cvector_glBuffer* vec, size_t size); void cvec_set_val_sz_glBuffer(cvector_glBuffer* vec, glBuffer val); void cvec_set_val_cap_glBuffer(cvector_glBuffer* vec, glBuffer val); glBuffer* cvec_back_glBuffer(cvector_glBuffer* vec); void cvec_clear_glBuffer(cvector_glBuffer* vec); void cvec_free_glBuffer_heap(void* vec); void cvec_free_glBuffer(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_glBuffer_H */ #endif #ifdef CVECTOR_glBuffer_IMPLEMENTATION size_t CVEC_glBuffer_SZ = 50; #define CVEC_glBuffer_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_glBuffer* cvec_glBuffer_heap(size_t size, size_t capacity) { cvector_glBuffer* vec; if (!(vec = (cvector_glBuffer*)CVEC_MALLOC(sizeof(cvector_glBuffer)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glBuffer_SZ; if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_glBuffer* cvec_init_glBuffer_heap(glBuffer* vals, size_t num) { cvector_glBuffer* vec; if (!(vec = (cvector_glBuffer*)CVEC_MALLOC(sizeof(cvector_glBuffer)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_glBuffer_SZ; vec->size = num; if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(glBuffer)*num); return vec; } int cvec_glBuffer(cvector_glBuffer* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glBuffer_SZ; if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_glBuffer(cvector_glBuffer* vec, glBuffer* vals, size_t num) { vec->capacity = num + CVEC_glBuffer_SZ; vec->size = num; if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(glBuffer)*num); return 1; } int cvec_copyc_glBuffer(void* dest, void* src) { cvector_glBuffer* vec1 = (cvector_glBuffer*)dest; cvector_glBuffer* vec2 = (cvector_glBuffer*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_glBuffer(vec1, vec2); } int cvec_copy_glBuffer(cvector_glBuffer* dest, cvector_glBuffer* src) { glBuffer* tmp = NULL; if (!(tmp = (glBuffer*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glBuffer)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glBuffer)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_glBuffer(cvector_glBuffer* vec, glBuffer a) { glBuffer* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_glBuffer_ALLOCATOR(vec->capacity); if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } glBuffer cvec_pop_glBuffer(cvector_glBuffer* vec) { return vec->a[--vec->size]; } glBuffer* cvec_back_glBuffer(cvector_glBuffer* vec) { return &vec->a[vec->size-1]; } int cvec_extend_glBuffer(cvector_glBuffer* vec, size_t num) { glBuffer* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glBuffer_SZ; if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a) { glBuffer* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glBuffer)); vec->a[i] = a; } else { tmp_sz = CVEC_glBuffer_ALLOCATOR(vec->capacity); if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glBuffer)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer* a, size_t num) { glBuffer* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glBuffer_SZ; if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glBuffer)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glBuffer)); vec->size += num; return 1; } glBuffer cvec_replace_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a) { glBuffer tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_glBuffer(cvector_glBuffer* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glBuffer)); vec->size -= d; } int cvec_reserve_glBuffer(cvector_glBuffer* vec, size_t size) { glBuffer* tmp; if (vec->capacity < size) { if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*(size+CVEC_glBuffer_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_glBuffer_SZ; } return 1; } int cvec_set_cap_glBuffer(cvector_glBuffer* vec, size_t size) { glBuffer* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_glBuffer(cvector_glBuffer* vec, glBuffer val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_glBuffer(cvector_glBuffer* vec, glBuffer val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_glBuffer(cvector_glBuffer* vec) { vec->size = 0; } void cvec_free_glBuffer_heap(void* vec) { cvector_glBuffer* tmp = (cvector_glBuffer*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_glBuffer(void* vec) { cvector_glBuffer* tmp = (cvector_glBuffer*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif #define CVECTOR_glTexture_IMPLEMENTATION #ifndef CVECTOR_glTexture_H #define CVECTOR_glTexture_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for glTexture vector. */ typedef struct cvector_glTexture { glTexture* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_glTexture; extern size_t CVEC_glTexture_SZ; int cvec_glTexture(cvector_glTexture* vec, size_t size, size_t capacity); int cvec_init_glTexture(cvector_glTexture* vec, glTexture* vals, size_t num); cvector_glTexture* cvec_glTexture_heap(size_t size, size_t capacity); cvector_glTexture* cvec_init_glTexture_heap(glTexture* vals, size_t num); int cvec_copyc_glTexture(void* dest, void* src); int cvec_copy_glTexture(cvector_glTexture* dest, cvector_glTexture* src); int cvec_push_glTexture(cvector_glTexture* vec, glTexture a); glTexture cvec_pop_glTexture(cvector_glTexture* vec); int cvec_extend_glTexture(cvector_glTexture* vec, size_t num); int cvec_insert_glTexture(cvector_glTexture* vec, size_t i, glTexture a); int cvec_insert_array_glTexture(cvector_glTexture* vec, size_t i, glTexture* a, size_t num); glTexture cvec_replace_glTexture(cvector_glTexture* vec, size_t i, glTexture a); void cvec_erase_glTexture(cvector_glTexture* vec, size_t start, size_t end); int cvec_reserve_glTexture(cvector_glTexture* vec, size_t size); int cvec_set_cap_glTexture(cvector_glTexture* vec, size_t size); void cvec_set_val_sz_glTexture(cvector_glTexture* vec, glTexture val); void cvec_set_val_cap_glTexture(cvector_glTexture* vec, glTexture val); glTexture* cvec_back_glTexture(cvector_glTexture* vec); void cvec_clear_glTexture(cvector_glTexture* vec); void cvec_free_glTexture_heap(void* vec); void cvec_free_glTexture(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_glTexture_H */ #endif #ifdef CVECTOR_glTexture_IMPLEMENTATION size_t CVEC_glTexture_SZ = 50; #define CVEC_glTexture_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_glTexture* cvec_glTexture_heap(size_t size, size_t capacity) { cvector_glTexture* vec; if (!(vec = (cvector_glTexture*)CVEC_MALLOC(sizeof(cvector_glTexture)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glTexture_SZ; if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_glTexture* cvec_init_glTexture_heap(glTexture* vals, size_t num) { cvector_glTexture* vec; if (!(vec = (cvector_glTexture*)CVEC_MALLOC(sizeof(cvector_glTexture)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_glTexture_SZ; vec->size = num; if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(glTexture)*num); return vec; } int cvec_glTexture(cvector_glTexture* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glTexture_SZ; if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_glTexture(cvector_glTexture* vec, glTexture* vals, size_t num) { vec->capacity = num + CVEC_glTexture_SZ; vec->size = num; if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(glTexture)*num); return 1; } int cvec_copyc_glTexture(void* dest, void* src) { cvector_glTexture* vec1 = (cvector_glTexture*)dest; cvector_glTexture* vec2 = (cvector_glTexture*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_glTexture(vec1, vec2); } int cvec_copy_glTexture(cvector_glTexture* dest, cvector_glTexture* src) { glTexture* tmp = NULL; if (!(tmp = (glTexture*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glTexture)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glTexture)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_glTexture(cvector_glTexture* vec, glTexture a) { glTexture* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_glTexture_ALLOCATOR(vec->capacity); if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } glTexture cvec_pop_glTexture(cvector_glTexture* vec) { return vec->a[--vec->size]; } glTexture* cvec_back_glTexture(cvector_glTexture* vec) { return &vec->a[vec->size-1]; } int cvec_extend_glTexture(cvector_glTexture* vec, size_t num) { glTexture* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glTexture_SZ; if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_glTexture(cvector_glTexture* vec, size_t i, glTexture a) { glTexture* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glTexture)); vec->a[i] = a; } else { tmp_sz = CVEC_glTexture_ALLOCATOR(vec->capacity); if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glTexture)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_glTexture(cvector_glTexture* vec, size_t i, glTexture* a, size_t num) { glTexture* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glTexture_SZ; if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glTexture)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glTexture)); vec->size += num; return 1; } glTexture cvec_replace_glTexture(cvector_glTexture* vec, size_t i, glTexture a) { glTexture tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_glTexture(cvector_glTexture* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glTexture)); vec->size -= d; } int cvec_reserve_glTexture(cvector_glTexture* vec, size_t size) { glTexture* tmp; if (vec->capacity < size) { if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*(size+CVEC_glTexture_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_glTexture_SZ; } return 1; } int cvec_set_cap_glTexture(cvector_glTexture* vec, size_t size) { glTexture* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_glTexture(cvector_glTexture* vec, glTexture val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_glTexture(cvector_glTexture* vec, glTexture val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_glTexture(cvector_glTexture* vec) { vec->size = 0; } void cvec_free_glTexture_heap(void* vec) { cvector_glTexture* tmp = (cvector_glTexture*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_glTexture(void* vec) { cvector_glTexture* tmp = (cvector_glTexture*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif #define CVECTOR_glProgram_IMPLEMENTATION #ifndef CVECTOR_glProgram_H #define CVECTOR_glProgram_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for glProgram vector. */ typedef struct cvector_glProgram { glProgram* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_glProgram; extern size_t CVEC_glProgram_SZ; int cvec_glProgram(cvector_glProgram* vec, size_t size, size_t capacity); int cvec_init_glProgram(cvector_glProgram* vec, glProgram* vals, size_t num); cvector_glProgram* cvec_glProgram_heap(size_t size, size_t capacity); cvector_glProgram* cvec_init_glProgram_heap(glProgram* vals, size_t num); int cvec_copyc_glProgram(void* dest, void* src); int cvec_copy_glProgram(cvector_glProgram* dest, cvector_glProgram* src); int cvec_push_glProgram(cvector_glProgram* vec, glProgram a); glProgram cvec_pop_glProgram(cvector_glProgram* vec); int cvec_extend_glProgram(cvector_glProgram* vec, size_t num); int cvec_insert_glProgram(cvector_glProgram* vec, size_t i, glProgram a); int cvec_insert_array_glProgram(cvector_glProgram* vec, size_t i, glProgram* a, size_t num); glProgram cvec_replace_glProgram(cvector_glProgram* vec, size_t i, glProgram a); void cvec_erase_glProgram(cvector_glProgram* vec, size_t start, size_t end); int cvec_reserve_glProgram(cvector_glProgram* vec, size_t size); int cvec_set_cap_glProgram(cvector_glProgram* vec, size_t size); void cvec_set_val_sz_glProgram(cvector_glProgram* vec, glProgram val); void cvec_set_val_cap_glProgram(cvector_glProgram* vec, glProgram val); glProgram* cvec_back_glProgram(cvector_glProgram* vec); void cvec_clear_glProgram(cvector_glProgram* vec); void cvec_free_glProgram_heap(void* vec); void cvec_free_glProgram(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_glProgram_H */ #endif #ifdef CVECTOR_glProgram_IMPLEMENTATION size_t CVEC_glProgram_SZ = 50; #define CVEC_glProgram_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_glProgram* cvec_glProgram_heap(size_t size, size_t capacity) { cvector_glProgram* vec; if (!(vec = (cvector_glProgram*)CVEC_MALLOC(sizeof(cvector_glProgram)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glProgram_SZ; if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_glProgram* cvec_init_glProgram_heap(glProgram* vals, size_t num) { cvector_glProgram* vec; if (!(vec = (cvector_glProgram*)CVEC_MALLOC(sizeof(cvector_glProgram)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_glProgram_SZ; vec->size = num; if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(glProgram)*num); return vec; } int cvec_glProgram(cvector_glProgram* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glProgram_SZ; if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_glProgram(cvector_glProgram* vec, glProgram* vals, size_t num) { vec->capacity = num + CVEC_glProgram_SZ; vec->size = num; if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(glProgram)*num); return 1; } int cvec_copyc_glProgram(void* dest, void* src) { cvector_glProgram* vec1 = (cvector_glProgram*)dest; cvector_glProgram* vec2 = (cvector_glProgram*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_glProgram(vec1, vec2); } int cvec_copy_glProgram(cvector_glProgram* dest, cvector_glProgram* src) { glProgram* tmp = NULL; if (!(tmp = (glProgram*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glProgram)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glProgram)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_glProgram(cvector_glProgram* vec, glProgram a) { glProgram* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_glProgram_ALLOCATOR(vec->capacity); if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } glProgram cvec_pop_glProgram(cvector_glProgram* vec) { return vec->a[--vec->size]; } glProgram* cvec_back_glProgram(cvector_glProgram* vec) { return &vec->a[vec->size-1]; } int cvec_extend_glProgram(cvector_glProgram* vec, size_t num) { glProgram* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glProgram_SZ; if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_glProgram(cvector_glProgram* vec, size_t i, glProgram a) { glProgram* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glProgram)); vec->a[i] = a; } else { tmp_sz = CVEC_glProgram_ALLOCATOR(vec->capacity); if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glProgram)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_glProgram(cvector_glProgram* vec, size_t i, glProgram* a, size_t num) { glProgram* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glProgram_SZ; if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glProgram)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glProgram)); vec->size += num; return 1; } glProgram cvec_replace_glProgram(cvector_glProgram* vec, size_t i, glProgram a) { glProgram tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_glProgram(cvector_glProgram* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glProgram)); vec->size -= d; } int cvec_reserve_glProgram(cvector_glProgram* vec, size_t size) { glProgram* tmp; if (vec->capacity < size) { if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*(size+CVEC_glProgram_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_glProgram_SZ; } return 1; } int cvec_set_cap_glProgram(cvector_glProgram* vec, size_t size) { glProgram* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_glProgram(cvector_glProgram* vec, glProgram val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_glProgram(cvector_glProgram* vec, glProgram val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_glProgram(cvector_glProgram* vec) { vec->size = 0; } void cvec_free_glProgram_heap(void* vec) { cvector_glProgram* tmp = (cvector_glProgram*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_glProgram(void* vec) { cvector_glProgram* tmp = (cvector_glProgram*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif #define CVECTOR_glVertex_IMPLEMENTATION #ifndef CVECTOR_glVertex_H #define CVECTOR_glVertex_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for glVertex vector. */ typedef struct cvector_glVertex { glVertex* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_glVertex; extern size_t CVEC_glVertex_SZ; int cvec_glVertex(cvector_glVertex* vec, size_t size, size_t capacity); int cvec_init_glVertex(cvector_glVertex* vec, glVertex* vals, size_t num); cvector_glVertex* cvec_glVertex_heap(size_t size, size_t capacity); cvector_glVertex* cvec_init_glVertex_heap(glVertex* vals, size_t num); int cvec_copyc_glVertex(void* dest, void* src); int cvec_copy_glVertex(cvector_glVertex* dest, cvector_glVertex* src); int cvec_push_glVertex(cvector_glVertex* vec, glVertex a); glVertex cvec_pop_glVertex(cvector_glVertex* vec); int cvec_extend_glVertex(cvector_glVertex* vec, size_t num); int cvec_insert_glVertex(cvector_glVertex* vec, size_t i, glVertex a); int cvec_insert_array_glVertex(cvector_glVertex* vec, size_t i, glVertex* a, size_t num); glVertex cvec_replace_glVertex(cvector_glVertex* vec, size_t i, glVertex a); void cvec_erase_glVertex(cvector_glVertex* vec, size_t start, size_t end); int cvec_reserve_glVertex(cvector_glVertex* vec, size_t size); int cvec_set_cap_glVertex(cvector_glVertex* vec, size_t size); void cvec_set_val_sz_glVertex(cvector_glVertex* vec, glVertex val); void cvec_set_val_cap_glVertex(cvector_glVertex* vec, glVertex val); glVertex* cvec_back_glVertex(cvector_glVertex* vec); void cvec_clear_glVertex(cvector_glVertex* vec); void cvec_free_glVertex_heap(void* vec); void cvec_free_glVertex(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_glVertex_H */ #endif #ifdef CVECTOR_glVertex_IMPLEMENTATION size_t CVEC_glVertex_SZ = 50; #define CVEC_glVertex_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_glVertex* cvec_glVertex_heap(size_t size, size_t capacity) { cvector_glVertex* vec; if (!(vec = (cvector_glVertex*)CVEC_MALLOC(sizeof(cvector_glVertex)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_SZ; if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_glVertex* cvec_init_glVertex_heap(glVertex* vals, size_t num) { cvector_glVertex* vec; if (!(vec = (cvector_glVertex*)CVEC_MALLOC(sizeof(cvector_glVertex)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_glVertex_SZ; vec->size = num; if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex)*num); return vec; } int cvec_glVertex(cvector_glVertex* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_SZ; if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_glVertex(cvector_glVertex* vec, glVertex* vals, size_t num) { vec->capacity = num + CVEC_glVertex_SZ; vec->size = num; if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex)*num); return 1; } int cvec_copyc_glVertex(void* dest, void* src) { cvector_glVertex* vec1 = (cvector_glVertex*)dest; cvector_glVertex* vec2 = (cvector_glVertex*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_glVertex(vec1, vec2); } int cvec_copy_glVertex(cvector_glVertex* dest, cvector_glVertex* src) { glVertex* tmp = NULL; if (!(tmp = (glVertex*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glVertex)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glVertex)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_glVertex(cvector_glVertex* vec, glVertex a) { glVertex* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_glVertex_ALLOCATOR(vec->capacity); if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } glVertex cvec_pop_glVertex(cvector_glVertex* vec) { return vec->a[--vec->size]; } glVertex* cvec_back_glVertex(cvector_glVertex* vec) { return &vec->a[vec->size-1]; } int cvec_extend_glVertex(cvector_glVertex* vec, size_t num) { glVertex* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glVertex_SZ; if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_glVertex(cvector_glVertex* vec, size_t i, glVertex a) { glVertex* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex)); vec->a[i] = a; } else { tmp_sz = CVEC_glVertex_ALLOCATOR(vec->capacity); if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_glVertex(cvector_glVertex* vec, size_t i, glVertex* a, size_t num) { glVertex* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_glVertex_SZ; if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glVertex)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glVertex)); vec->size += num; return 1; } glVertex cvec_replace_glVertex(cvector_glVertex* vec, size_t i, glVertex a) { glVertex tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_glVertex(cvector_glVertex* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glVertex)); vec->size -= d; } int cvec_reserve_glVertex(cvector_glVertex* vec, size_t size) { glVertex* tmp; if (vec->capacity < size) { if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*(size+CVEC_glVertex_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_glVertex_SZ; } return 1; } int cvec_set_cap_glVertex(cvector_glVertex* vec, size_t size) { glVertex* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_glVertex(cvector_glVertex* vec, glVertex val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_glVertex(cvector_glVertex* vec, glVertex val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_glVertex(cvector_glVertex* vec) { vec->size = 0; } void cvec_free_glVertex_heap(void* vec) { cvector_glVertex* tmp = (cvector_glVertex*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_glVertex(void* vec) { cvector_glVertex* tmp = (cvector_glVertex*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif #define CVECTOR_float_IMPLEMENTATION #ifndef CVECTOR_float_H #define CVECTOR_float_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif /** Data structure for float vector. */ typedef struct cvector_float { float* a; /**< Array. */ size_t size; /**< Current size (amount you use when manipulating array directly). */ size_t capacity; /**< Allocated size of array; always >= size. */ } cvector_float; extern size_t CVEC_float_SZ; int cvec_float(cvector_float* vec, size_t size, size_t capacity); int cvec_init_float(cvector_float* vec, float* vals, size_t num); cvector_float* cvec_float_heap(size_t size, size_t capacity); cvector_float* cvec_init_float_heap(float* vals, size_t num); int cvec_copyc_float(void* dest, void* src); int cvec_copy_float(cvector_float* dest, cvector_float* src); int cvec_push_float(cvector_float* vec, float a); float cvec_pop_float(cvector_float* vec); int cvec_extend_float(cvector_float* vec, size_t num); int cvec_insert_float(cvector_float* vec, size_t i, float a); int cvec_insert_array_float(cvector_float* vec, size_t i, float* a, size_t num); float cvec_replace_float(cvector_float* vec, size_t i, float a); void cvec_erase_float(cvector_float* vec, size_t start, size_t end); int cvec_reserve_float(cvector_float* vec, size_t size); int cvec_set_cap_float(cvector_float* vec, size_t size); void cvec_set_val_sz_float(cvector_float* vec, float val); void cvec_set_val_cap_float(cvector_float* vec, float val); float* cvec_back_float(cvector_float* vec); void cvec_clear_float(cvector_float* vec); void cvec_free_float_heap(void* vec); void cvec_free_float(void* vec); #ifdef __cplusplus } #endif /* CVECTOR_float_H */ #endif #ifdef CVECTOR_float_IMPLEMENTATION size_t CVEC_float_SZ = 50; #define CVEC_float_ALLOCATOR(x) ((x+1) * 2) #if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC) /* ok */ #elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC) /* ok */ #else #error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC." #endif #ifndef CVEC_MALLOC #define CVEC_MALLOC(sz) malloc(sz) #define CVEC_REALLOC(p, sz) realloc(p, sz) #define CVEC_FREE(p) free(p) #endif #ifndef CVEC_MEMMOVE #include <string.h> #define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz) #endif #ifndef CVEC_ASSERT #include <assert.h> #define CVEC_ASSERT(x) assert(x) #endif cvector_float* cvec_float_heap(size_t size, size_t capacity) { cvector_float* vec; if (!(vec = (cvector_float*)CVEC_MALLOC(sizeof(cvector_float)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_float_SZ; if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } return vec; } cvector_float* cvec_init_float_heap(float* vals, size_t num) { cvector_float* vec; if (!(vec = (cvector_float*)CVEC_MALLOC(sizeof(cvector_float)))) { CVEC_ASSERT(vec != NULL); return NULL; } vec->capacity = num + CVEC_float_SZ; vec->size = num; if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) { CVEC_ASSERT(vec->a != NULL); CVEC_FREE(vec); return NULL; } CVEC_MEMMOVE(vec->a, vals, sizeof(float)*num); return vec; } int cvec_float(cvector_float* vec, size_t size, size_t capacity) { vec->size = size; vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_float_SZ; if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } return 1; } int cvec_init_float(cvector_float* vec, float* vals, size_t num) { vec->capacity = num + CVEC_float_SZ; vec->size = num; if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) { CVEC_ASSERT(vec->a != NULL); vec->size = vec->capacity = 0; return 0; } CVEC_MEMMOVE(vec->a, vals, sizeof(float)*num); return 1; } int cvec_copyc_float(void* dest, void* src) { cvector_float* vec1 = (cvector_float*)dest; cvector_float* vec2 = (cvector_float*)src; vec1->a = NULL; vec1->size = 0; vec1->capacity = 0; return cvec_copy_float(vec1, vec2); } int cvec_copy_float(cvector_float* dest, cvector_float* src) { float* tmp = NULL; if (!(tmp = (float*)CVEC_REALLOC(dest->a, src->capacity*sizeof(float)))) { CVEC_ASSERT(tmp != NULL); return 0; } dest->a = tmp; CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(float)); dest->size = src->size; dest->capacity = src->capacity; return 1; } int cvec_push_float(cvector_float* vec, float a) { float* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { vec->a[vec->size++] = a; } else { tmp_sz = CVEC_float_ALLOCATOR(vec->capacity); if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->a[vec->size++] = a; vec->capacity = tmp_sz; } return 1; } float cvec_pop_float(cvector_float* vec) { return vec->a[--vec->size]; } float* cvec_back_float(cvector_float* vec) { return &vec->a[vec->size-1]; } int cvec_extend_float(cvector_float* vec, size_t num) { float* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_float_SZ; if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } vec->size += num; return 1; } int cvec_insert_float(cvector_float* vec, size_t i, float a) { float* tmp; size_t tmp_sz; if (vec->capacity > vec->size) { CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(float)); vec->a[i] = a; } else { tmp_sz = CVEC_float_ALLOCATOR(vec->capacity); if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(float)); vec->a[i] = a; vec->capacity = tmp_sz; } vec->size++; return 1; } int cvec_insert_array_float(cvector_float* vec, size_t i, float* a, size_t num) { float* tmp; size_t tmp_sz; if (vec->capacity < vec->size + num) { tmp_sz = vec->capacity + num + CVEC_float_SZ; if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = tmp_sz; } CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(float)); CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(float)); vec->size += num; return 1; } float cvec_replace_float(cvector_float* vec, size_t i, float a) { float tmp = vec->a[i]; vec->a[i] = a; return tmp; } void cvec_erase_float(cvector_float* vec, size_t start, size_t end) { size_t d = end - start + 1; CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(float)); vec->size -= d; } int cvec_reserve_float(cvector_float* vec, size_t size) { float* tmp; if (vec->capacity < size) { if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*(size+CVEC_float_SZ)))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size + CVEC_float_SZ; } return 1; } int cvec_set_cap_float(cvector_float* vec, size_t size) { float* tmp; if (size < vec->size) { vec->size = size; } if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*size))) { CVEC_ASSERT(tmp != NULL); return 0; } vec->a = tmp; vec->capacity = size; return 1; } void cvec_set_val_sz_float(cvector_float* vec, float val) { size_t i; for (i=0; i<vec->size; i++) { vec->a[i] = val; } } void cvec_set_val_cap_float(cvector_float* vec, float val) { size_t i; for (i=0; i<vec->capacity; i++) { vec->a[i] = val; } } void cvec_clear_float(cvector_float* vec) { vec->size = 0; } void cvec_free_float_heap(void* vec) { cvector_float* tmp = (cvector_float*)vec; if (!tmp) return; CVEC_FREE(tmp->a); CVEC_FREE(tmp); } void cvec_free_float(void* vec) { cvector_float* tmp = (cvector_float*)vec; CVEC_FREE(tmp->a); tmp->size = 0; tmp->capacity = 0; } #endif static glContext* c; static Color blend_pixel(vec4 src, vec4 dst); static void draw_pixel_vec2(vec4 cf, vec2 pos, float z); static void draw_pixel(vec4 cf, int x, int y, float z); static void run_pipeline(GLenum mode, GLint first, GLsizei count, GLsizei instance, GLuint base_instance, GLboolean use_elements); static void draw_triangle_clip(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke, int clip_bit); static void draw_triangle_point(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke); static void draw_triangle_line(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke); static void draw_triangle_fill(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke); static void draw_triangle_final(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke); static void draw_triangle(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke); static void draw_line_clip(glVertex* v1, glVertex* v2); static void draw_line_shader(vec4 v1, vec4 v2, float* v1_out, float* v2_out, unsigned int provoke); static void draw_line_smooth_shader(vec4 v1, vec4 v2, float* v1_out, float* v2_out, unsigned int provoke); /* this clip epsilon is needed to avoid some rounding errors after several clipping stages */ #define CLIP_EPSILON (1E-5) static inline int gl_clipcode(vec4 pt) { float w; w = pt.w * (1.0 + CLIP_EPSILON); return (((pt.z < -w) | ((pt.z > w) << 1)) & (!c->depth_clamp | !c->depth_clamp << 1)) | ((pt.x < -w) << 2) | ((pt.x > w) << 3) | ((pt.y < -w) << 4) | ((pt.y > w) << 5); } static int is_front_facing(glVertex* v0, glVertex* v1, glVertex* v2) { //according to docs culling is done based on window coordinates //See page 3.6.1 page 116 of glspec33.core for more on rasterization, culling etc. // //TODO bug where it doesn't correctly cull if part of the triangle goes behind eye vec3 normal, tmpvec3 = { 0, 0, 1 }; vec3 p0 = vec4_to_vec3h(v0->screen_space); vec3 p1 = vec4_to_vec3h(v1->screen_space); vec3 p2 = vec4_to_vec3h(v2->screen_space); //float a; //method from spec //a = p0.x*p1.y - p1.x*p0.y + p1.x*p2.y - p2.x*p1.y + p2.x*p0.y - p0.x*p2.y; //a /= 2; normal = cross_product(sub_vec3s(p1, p0), sub_vec3s(p2, p0)); if (c->front_face == GL_CW) { //a = -a; normal = negate_vec3(normal); } //if (a <= 0) { if (dot_vec3s(normal, tmpvec3) <= 0) { return 0; } return 1; } static void do_vertex(glVertex_Attrib* v, int* enabled, unsigned int num_enabled, unsigned int i, unsigned int vert) { GLuint buf; u8* buf_pos; vec4 tmpvec4; // copy/prep vertex attributes from buffers into appropriate positions for vertex shader to access for (int j=0; j<num_enabled; ++j) { buf = v[enabled[j]].buf; buf_pos = (u8*)c->buffers.a[buf].data + v[enabled[j]].offset + v[enabled[j]].stride*i; SET_VEC4(tmpvec4, 0.0f, 0.0f, 0.0f, 1.0f); memcpy(&tmpvec4, buf_pos, sizeof(float)*v[enabled[j]].size); c->vertex_attribs_vs[enabled[j]] = tmpvec4; } float* vs_out = &c->vs_output.output_buf.a[vert*c->vs_output.size]; c->programs.a[c->cur_program].vertex_shader(vs_out, c->vertex_attribs_vs, &c->builtins, c->programs.a[c->cur_program].uniform); c->glverts.a[vert].vs_out = vs_out; c->glverts.a[vert].clip_space = c->builtins.gl_Position; c->glverts.a[vert].edge_flag = 1; c->glverts.a[vert].clip_code = gl_clipcode(c->builtins.gl_Position); } static void vertex_stage(GLint first, GLsizei count, GLsizei instance_id, GLuint base_instance, GLboolean use_elements) { unsigned int i, j, vert, num_enabled; u8* buf_pos; //save checking if enabled on every loop if we build this first //also initialize the vertex_attrib space float vec4_init[] = { 0.0f, 0.0f, 0.0f, 1.0f }; int enabled[GL_MAX_VERTEX_ATTRIBS] = { 0 }; glVertex_Attrib* v = c->vertex_arrays.a[c->cur_vertex_array].vertex_attribs; GLuint elem_buffer = c->vertex_arrays.a[c->cur_vertex_array].element_buffer; for (i=0, j=0; i<GL_MAX_VERTEX_ATTRIBS; ++i) { if (v[i].enabled) { if (v[i].divisor == 0) { // no need to set to defalt vector here because it's handled in do_vertex() enabled[j++] = i; } else if (!(instance_id % v[i].divisor)) { //set instanced attributes if necessary // only reset to default vector right before updating, because // it has to stay the same across multiple instances for divisors > 1 memcpy(&c->vertex_attribs_vs[i], vec4_init, sizeof(vec4)); int n = instance_id/v[i].divisor + base_instance; buf_pos = (u8*)c->buffers.a[v[i].buf].data + v[i].offset + v[i].stride*n; memcpy(&c->vertex_attribs_vs[i], buf_pos, sizeof(float)*v[i].size); } } } num_enabled = j; cvec_reserve_glVertex(&c->glverts, count); c->builtins.gl_InstanceID = instance_id; if (!use_elements) { for (vert=0, i=first; i<first+count; ++i, ++vert) { do_vertex(v, enabled, num_enabled, i, vert); } } else { GLuint* uint_array = (GLuint*) c->buffers.a[elem_buffer].data; GLushort* ushort_array = (GLushort*) c->buffers.a[elem_buffer].data; GLubyte* ubyte_array = (GLubyte*) c->buffers.a[elem_buffer].data; if (c->buffers.a[elem_buffer].type == GL_UNSIGNED_BYTE) { for (vert=0, i=first; i<first+count; ++i, ++vert) { do_vertex(v, enabled, num_enabled, ubyte_array[i], vert); } } else if (c->buffers.a[elem_buffer].type == GL_UNSIGNED_SHORT) { for (vert=0, i=first; i<first+count; ++i, ++vert) { do_vertex(v, enabled, num_enabled, ushort_array[i], vert); } } else { for (vert=0, i=first; i<first+count; ++i, ++vert) { do_vertex(v, enabled, num_enabled, uint_array[i], vert); } } } } //TODO make fs_input static? or a member of glContext? static void draw_point(glVertex* vert) { float fs_input[GL_MAX_VERTEX_OUTPUT_COMPONENTS]; vec3 point = vec4_to_vec3h(vert->screen_space); point.z = MAP(point.z, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far); //TODO not sure if I'm supposed to do this ... doesn't say to in spec but it is called depth clamping //but I don't do it for lines or triangles (at least in fill or line mode) if (c->depth_clamp) point.z = clampf_01(point.z); //TODO why not just pass vs_output directly? hmmm... memcpy(fs_input, vert->vs_out, c->vs_output.size*sizeof(float)); //accounting for pixel centers at 0.5, using truncation float x = point.x + 0.5f; float y = point.y + 0.5f; float p_size = c->point_size; float origin = (c->point_spr_origin == GL_UPPER_LEFT) ? -1.0f : 1.0f; // Can easily clip whole point when point size <= 1 ... if (p_size <= 1) { if (x < 0 || y < 0 || x >= c->back_buffer.w || y >= c->back_buffer.h) return; } for (float i = y-p_size/2; i<y+p_size/2; ++i) { if (i < 0 || i >= c->back_buffer.h) continue; for (float j = x-p_size/2; j<x+p_size/2; ++j) { if (j < 0 || j >= c->back_buffer.w) continue; // per page 110 of 3.3 spec c->builtins.gl_PointCoord.x = 0.5f + ((int)j + 0.5f - point.x)/p_size; c->builtins.gl_PointCoord.y = 0.5f + origin * ((int)i + 0.5f - point.y)/p_size; SET_VEC4(c->builtins.gl_FragCoord, j, i, point.z, 1/vert->screen_space.w); c->builtins.discard = GL_FALSE; c->builtins.gl_FragDepth = point.z; c->programs.a[c->cur_program].fragment_shader(fs_input, &c->builtins, c->programs.a[c->cur_program].uniform); if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, j, i, c->builtins.gl_FragDepth); } } } static void run_pipeline(GLenum mode, GLint first, GLsizei count, GLsizei instance, GLuint base_instance, GLboolean use_elements) { unsigned int i, vert; int provoke; assert(count <= MAX_VERTICES); vertex_stage(first, count, instance, base_instance, use_elements); //fragment portion if (mode == GL_POINTS) { for (vert=0, i=first; i<first+count; ++i, ++vert) { if (c->glverts.a[vert].clip_code) continue; c->glverts.a[vert].screen_space = mult_mat4_vec4(c->vp_mat, c->glverts.a[vert].clip_space); draw_point(&c->glverts.a[vert]); } } else if (mode == GL_LINES) { for (vert=0, i=first; i<first+count-1; i+=2, vert+=2) { draw_line_clip(&c->glverts.a[vert], &c->glverts.a[vert+1]); } } else if (mode == GL_LINE_STRIP) { for (vert=0, i=first; i<first+count-1; i++, vert++) { draw_line_clip(&c->glverts.a[vert], &c->glverts.a[vert+1]); } } else if (mode == GL_LINE_LOOP) { for (vert=0, i=first; i<first+count-1; i++, vert++) { draw_line_clip(&c->glverts.a[vert], &c->glverts.a[vert+1]); } //draw ending line from last to first point draw_line_clip(&c->glverts.a[count-1], &c->glverts.a[0]); } else if (mode == GL_TRIANGLES) { provoke = (c->provoking_vert == GL_LAST_VERTEX_CONVENTION) ? 2 : 0; for (vert=0, i=first; i<first+count-2; i+=3, vert+=3) { draw_triangle(&c->glverts.a[vert], &c->glverts.a[vert+1], &c->glverts.a[vert+2], vert+provoke); } } else if (mode == GL_TRIANGLE_STRIP) { unsigned int a=0, b=1, toggle = 0; provoke = (c->provoking_vert == GL_LAST_VERTEX_CONVENTION) ? 0 : -2; for (vert=2; vert<count; ++vert) { draw_triangle(&c->glverts.a[a], &c->glverts.a[b], &c->glverts.a[vert], vert+provoke); if (!toggle) a = vert; else b = vert; toggle = !toggle; } } else if (mode == GL_TRIANGLE_FAN) { provoke = (c->provoking_vert == GL_LAST_VERTEX_CONVENTION) ? 0 : -1; for (vert=2; vert<count; ++vert) { draw_triangle(&c->glverts.a[0], &c->glverts.a[vert-1], &c->glverts.a[vert], vert+provoke); } } } static int depthtest(float zval, float zbufval) { // TODO not sure if I should do this since it's supposed to prevent writing to the buffer // but not afaik, change the result of the test if (!c->depth_mask) return 0; switch (c->depth_func) { case GL_LESS: return zval < zbufval; case GL_LEQUAL: return zval <= zbufval; case GL_GREATER: return zval > zbufval; case GL_GEQUAL: return zval >= zbufval; case GL_EQUAL: return zval == zbufval; case GL_NOTEQUAL: return zval != zbufval; case GL_ALWAYS: return 1; case GL_NEVER: return 0; } return 0; //get rid of compile warning } static void setup_fs_input(float t, float* v1_out, float* v2_out, float wa, float wb, unsigned int provoke) { float* vs_output = &c->vs_output.output_buf.a[0]; float inv_wa = 1.0/wa; float inv_wb = 1.0/wb; for (int i=0; i<c->vs_output.size; ++i) { if (c->vs_output.interpolation[i] == SMOOTH) { c->fs_input[i] = (v1_out[i]*inv_wa + t*(v2_out[i]*inv_wb - v1_out[i]*inv_wa)) / (inv_wa + t*(inv_wb - inv_wa)); } else if (c->vs_output.interpolation[i] == NOPERSPECTIVE) { c->fs_input[i] = v1_out[i] + t*(v2_out[i] - v1_out[i]); } else { c->fs_input[i] = vs_output[provoke*c->vs_output.size + i]; } } c->builtins.discard = GL_FALSE; } /* Line Clipping algorithm from 'Computer Graphics', Principles and Practice */ static inline int clip_line(float denom, float num, float* tmin, float* tmax) { float t; if (denom > 0) { t = num / denom; if (t > *tmax) return 0; if (t > *tmin) { *tmin = t; //printf("t > *tmin %f\n", t); } } else if (denom < 0) { t = num / denom; if (t < *tmin) return 0; if (t < *tmax) { *tmax = t; //printf("t < *tmax %f\n", t); } } else if (num > 0) return 0; return 1; } static void interpolate_clipped_line(glVertex* v1, glVertex* v2, float* v1_out, float* v2_out, float tmin, float tmax) { for (int i=0; i<c->vs_output.size; ++i) { v1_out[i] = v1->vs_out[i] + (v2->vs_out[i] - v1->vs_out[i])*tmin; v2_out[i] = v1->vs_out[i] + (v2->vs_out[i] - v1->vs_out[i])*tmax; //v2_out[i] = (1 - tmax)*v1->vs_out[i] + tmax*v2->vs_out[i]; } } static void draw_line_clip(glVertex* v1, glVertex* v2) { int cc1, cc2; vec4 d, p1, p2, t1, t2; float tmin, tmax; cc1 = v1->clip_code; cc2 = v2->clip_code; p1 = v1->clip_space; p2 = v2->clip_space; float v1_out[GL_MAX_VERTEX_OUTPUT_COMPONENTS]; float v2_out[GL_MAX_VERTEX_OUTPUT_COMPONENTS]; //TODO ponder this unsigned int provoke; if (c->provoking_vert == GL_LAST_VERTEX_CONVENTION) provoke = (v2 - c->glverts.a)/sizeof(glVertex); else provoke = (v1 - c->glverts.a)/sizeof(glVertex); if (cc1 & cc2) { return; } else if ((cc1 | cc2) == 0) { t1 = mult_mat4_vec4(c->vp_mat, p1); t2 = mult_mat4_vec4(c->vp_mat, p2); //no need //memcpy(v1_out, v1->vs_out, c->vs_output.size*sizeof(float)); //memcpy(v2_out, v2->vs_out, c->vs_output.size*sizeof(float)); if (!c->line_smooth) draw_line_shader(t1, t2, v1->vs_out, v2->vs_out, provoke); else draw_line_smooth_shader(t1, t2, v1->vs_out, v2->vs_out, provoke); } else { d = sub_vec4s(p2, p1); tmin = 0; tmax = 1; if (clip_line( d.x+d.w, -p1.x-p1.w, &tmin, &tmax) && clip_line(-d.x+d.w, p1.x-p1.w, &tmin, &tmax) && clip_line( d.y+d.w, -p1.y-p1.w, &tmin, &tmax) && clip_line(-d.y+d.w, p1.y-p1.w, &tmin, &tmax) && clip_line( d.z+d.w, -p1.z-p1.w, &tmin, &tmax) && clip_line(-d.z+d.w, p1.z-p1.w, &tmin, &tmax)) { //printf("%f %f\n", tmin, tmax); t1 = add_vec4s(p1, scale_vec4(d, tmin)); t2 = add_vec4s(p1, scale_vec4(d, tmax)); t1 = mult_mat4_vec4(c->vp_mat, t1); t2 = mult_mat4_vec4(c->vp_mat, t2); //print_vec4(t1, "\n"); //print_vec4(t2, "\n"); interpolate_clipped_line(v1, v2, v1_out, v2_out, tmin, tmax); if (!c->line_smooth) draw_line_shader(t1, t2, v1_out, v2_out, provoke); else draw_line_smooth_shader(t1, t2, v1_out, v2_out, provoke); } } } static void draw_line_shader(vec4 v1, vec4 v2, float* v1_out, float* v2_out, unsigned int provoke) { float tmp; float* tmp_ptr; vec3 hp1 = vec4_to_vec3h(v1); vec3 hp2 = vec4_to_vec3h(v2); //print_vec3(hp1, "\n"); //print_vec3(hp2, "\n"); float w1 = v1.w; float w2 = v2.w; float x1 = hp1.x, x2 = hp2.x, y1 = hp1.y, y2 = hp2.y; float z1 = hp1.z, z2 = hp2.z; //always draw from left to right if (x2 < x1) { tmp = x1; x1 = x2; x2 = tmp; tmp = y1; y1 = y2; y2 = tmp; tmp = z1; z1 = z2; z2 = tmp; tmp = w1; w1 = w2; w2 = tmp; tmp_ptr = v1_out; v1_out = v2_out; v2_out = tmp_ptr; } //calculate slope and implicit line parameters once //could just use my Line type/constructor as in draw_triangle float m = (y2-y1)/(x2-x1); Line line = make_Line(x1, y1, x2, y2); float t, x, y, z, w; vec2 p1 = { x1, y1 }, p2 = { x2, y2 }; vec2 pr, sub_p2p1 = sub_vec2s(p2, p1); float line_length_squared = length_vec2(sub_p2p1); line_length_squared *= line_length_squared; frag_func fragment_shader = c->programs.a[c->cur_program].fragment_shader; void* uniform = c->programs.a[c->cur_program].uniform; int fragdepth_or_discard = c->programs.a[c->cur_program].fragdepth_or_discard; float i_x1, i_y1, i_x2, i_y2; i_x1 = floor(p1.x) + 0.5; i_y1 = floor(p1.y) + 0.5; i_x2 = floor(p2.x) + 0.5; i_y2 = floor(p2.y) + 0.5; float x_min, x_max, y_min, y_max; x_min = i_x1; x_max = i_x2; //always left to right; if (m <= 0) { y_min = i_y2; y_max = i_y1; } else { y_min = i_y1; y_max = i_y2; } //printf("%f %f %f %f =\n", i_x1, i_y1, i_x2, i_y2); //printf("%f %f %f %f x_min etc\n", x_min, x_max, y_min, y_max); z1 = MAP(z1, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far); z2 = MAP(z2, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far); //4 cases based on slope if (m <= -1) { //(-infinite, -1] //printf("slope <= -1\n"); for (x = x_min, y = y_max; y>=y_min && x<=x_max; --y) { pr.x = x; pr.y = y; t = dot_vec2s(sub_vec2s(pr, p1), sub_p2p1) / line_length_squared; z = (1 - t) * z1 + t * z2; w = (1 - t) * w1 + t * w2; SET_VEC4(c->builtins.gl_FragCoord, x, y, z, 1/w); c->builtins.discard = GL_FALSE; c->builtins.gl_FragDepth = z; setup_fs_input(t, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, x, y, c->builtins.gl_FragDepth); line_1: if (line_func(&line, x+0.5f, y-1) < 0) //A*(x+0.5f) + B*(y-1) + C < 0) ++x; } } else if (m <= 0) { //(-1, 0] //printf("slope = (-1, 0]\n"); for (x = x_min, y = y_max; x<=x_max && y>=y_min; ++x) { pr.x = x; pr.y = y; t = dot_vec2s(sub_vec2s(pr, p1), sub_p2p1) / line_length_squared; z = (1 - t) * z1 + t * z2; w = (1 - t) * w1 + t * w2; SET_VEC4(c->builtins.gl_FragCoord, x, y, z, 1/w); c->builtins.discard = GL_FALSE; c->builtins.gl_FragDepth = z; setup_fs_input(t, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, x, y, c->builtins.gl_FragDepth); line_2: if (line_func(&line, x+1, y-0.5f) > 0) //A*(x+1) + B*(y-0.5f) + C > 0) --y; } } else if (m <= 1) { //(0, 1] //printf("slope = (0, 1]\n"); for (x = x_min, y = y_min; x <= x_max && y <= y_max; ++x) { pr.x = x; pr.y = y; t = dot_vec2s(sub_vec2s(pr, p1), sub_p2p1) / line_length_squared; z = (1 - t) * z1 + t * z2; w = (1 - t) * w1 + t * w2; SET_VEC4(c->builtins.gl_FragCoord, x, y, z, 1/w); c->builtins.discard = GL_FALSE; c->builtins.gl_FragDepth = z; setup_fs_input(t, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, x, y, c->builtins.gl_FragDepth); line_3: if (line_func(&line, x+1, y+0.5f) < 0) //A*(x+1) + B*(y+0.5f) + C < 0) ++y; } } else { //(1, +infinite) //printf("slope > 1\n"); for (x = x_min, y = y_min; y<=y_max && x <= x_max; ++y) { pr.x = x; pr.y = y; t = dot_vec2s(sub_vec2s(pr, p1), sub_p2p1) / line_length_squared; z = (1 - t) * z1 + t * z2; w = (1 - t) * w1 + t * w2; SET_VEC4(c->builtins.gl_FragCoord, x, y, z, 1/w); c->builtins.discard = GL_FALSE; c->builtins.gl_FragDepth = z; setup_fs_input(t, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, x, y, c->builtins.gl_FragDepth); line_4: if (line_func(&line, x+0.5f, y+1) > 0) //A*(x+0.5f) + B*(y+1) + C > 0) ++x; } } } // WARNING: this function is subject to serious change or removal and is currently unused (GL_LINE_SMOOTH unsupported) // TODO do it right, handle depth test correctly since we moved it into draw_pixel static void draw_line_smooth_shader(vec4 v1, vec4 v2, float* v1_out, float* v2_out, unsigned int provoke) { float tmp; float* tmp_ptr; frag_func fragment_shader = c->programs.a[c->cur_program].fragment_shader; void* uniform = c->programs.a[c->cur_program].uniform; int fragdepth_or_discard = c->programs.a[c->cur_program].fragdepth_or_discard; vec3 hp1 = vec4_to_vec3h(v1); vec3 hp2 = vec4_to_vec3h(v2); float x1 = hp1.x, x2 = hp2.x, y1 = hp1.y, y2 = hp2.y; float z1 = hp1.z, z2 = hp2.z; float w1 = v1.w; float w2 = v2.w; int x, j; int steep = fabsf(y2 - y1) > fabsf(x2 - x1); if (steep) { tmp = x1; x1 = y1; y1 = tmp; tmp = x2; x2 = y2; y2 = tmp; } if (x1 > x2) { tmp = x1; x1 = x2; x2 = tmp; tmp = y1; y1 = y2; y2 = tmp; tmp = z1; z1 = z2; z2 = tmp; tmp = w1; w1 = w2; w2 = tmp; tmp_ptr = v1_out; v1_out = v2_out; v2_out = tmp_ptr; } float dx = x2 - x1; float dy = y2 - y1; float gradient = dy / dx; float xend = x1 + 0.5f; float yend = y1 + gradient * (xend - x1); float xgap = 1.0 - modff(x1 + 0.5, &tmp); float xpxl1 = xend; float ypxl1; modff(yend, &ypxl1); //choose to compare against just one pixel for depth test instead of both z1 = MAP(z1, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far); if (steep) { if (!c->depth_test || (!fragdepth_or_discard && depthtest(z1, ((float*)c->zbuf.lastrow)[-(int)xpxl1*c->zbuf.w + (int)ypxl1]))) { if (!c->fragdepth_or_discard && c->depth_test) { //hate this double check but depth buf is only update if enabled ((float*)c->zbuf.lastrow)[-(int)xpxl1*c->zbuf.w + (int)ypxl1] = z1; ((float*)c->zbuf.lastrow)[-(int)xpxl1*c->zbuf.w + (int)(ypxl1+1)] = z1; } SET_VEC4(c->builtins.gl_FragCoord, ypxl1, xpxl1, z1, 1/w1); setup_fs_input(0, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = (1.0 - modff(yend, &tmp)) * xgap; if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, ypxl1, xpxl1, c->builtins.gl_FragDepth); SET_VEC4(c->builtins.gl_FragCoord, ypxl1+1, xpxl1, z1, 1/w1); setup_fs_input(0, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = modff(yend, &tmp) * xgap; if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, ypxl1+1, xpxl1, c->builtins.gl_FragDepth); } } else { if (!c->depth_test || (!fragdepth_or_discard && depthtest(z1, ((float*)c->zbuf.lastrow)[-(int)ypxl1*c->zbuf.w + (int)xpxl1]))) { if (!c->fragdepth_or_discard && c->depth_test) { //hate this double check but depth buf is only update if enabled ((float*)c->zbuf.lastrow)[-(int)ypxl1*c->zbuf.w + (int)xpxl1] = z1; ((float*)c->zbuf.lastrow)[-(int)(ypxl1+1)*c->zbuf.w + (int)xpxl1] = z1; } SET_VEC4(c->builtins.gl_FragCoord, xpxl1, ypxl1, z1, 1/w1); setup_fs_input(0, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = (1.0 - modff(yend, &tmp)) * xgap; if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, xpxl1, ypxl1, c->builtins.gl_FragDepth); SET_VEC4(c->builtins.gl_FragCoord, xpxl1, ypxl1+1, z1, 1/w1); setup_fs_input(0, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = modff(yend, &tmp) * xgap; if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, xpxl1, ypxl1+1, c->builtins.gl_FragDepth); } } float intery = yend + gradient; //first y-intersection for main loop xend = x2 + 0.5f; yend = y2 + gradient * (xend - x2); xgap = modff(x2 + 0.5, &tmp); float xpxl2 = xend; float ypxl2; modff(yend, &ypxl2); z2 = MAP(z2, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far); if (steep) { if (!c->depth_test || (!fragdepth_or_discard && depthtest(z2, ((float*)c->zbuf.lastrow)[-(int)xpxl2*c->zbuf.w + (int)ypxl2]))) { if (!c->fragdepth_or_discard && c->depth_test) { ((float*)c->zbuf.lastrow)[-(int)xpxl2*c->zbuf.w + (int)ypxl2] = z2; ((float*)c->zbuf.lastrow)[-(int)xpxl2*c->zbuf.w + (int)(ypxl2+1)] = z2; } SET_VEC4(c->builtins.gl_FragCoord, ypxl2, xpxl2, z2, 1/w2); setup_fs_input(1, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = (1.0 - modff(yend, &tmp)) * xgap; if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, ypxl2, xpxl2, c->builtins.gl_FragDepth); SET_VEC4(c->builtins.gl_FragCoord, ypxl2+1, xpxl2, z2, 1/w2); setup_fs_input(1, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = modff(yend, &tmp) * xgap; if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, ypxl2+1, xpxl2, c->builtins.gl_FragDepth); } } else { if (!c->depth_test || (!fragdepth_or_discard && depthtest(z2, ((float*)c->zbuf.lastrow)[-(int)ypxl2*c->zbuf.w + (int)xpxl2]))) { if (!c->fragdepth_or_discard && c->depth_test) { ((float*)c->zbuf.lastrow)[-(int)ypxl2*c->zbuf.w + (int)xpxl2] = z2; ((float*)c->zbuf.lastrow)[-(int)(ypxl2+1)*c->zbuf.w + (int)xpxl2] = z2; } SET_VEC4(c->builtins.gl_FragCoord, xpxl2, ypxl2, z2, 1/w2); setup_fs_input(1, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = (1.0 - modff(yend, &tmp)) * xgap; if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, xpxl2, ypxl2, c->builtins.gl_FragDepth); SET_VEC4(c->builtins.gl_FragCoord, xpxl2, ypxl2+1, z2, 1/w2); setup_fs_input(1, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = modff(yend, &tmp) * xgap; if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, xpxl2, ypxl2+1, c->builtins.gl_FragDepth); } } //use the fast, inaccurate calculation of t since this algorithm is already //slower than the normal line drawing, pg 111 glspec if I ever want to fix it float range = ceil(x2-x1); float t, z, w; for (j=1, x = xpxl1 + 1; x < xpxl2; ++x, ++j, intery += gradient) { t = j/range; z = (1 - t) * z1 + t * z2; w = (1 - t) * w1 + t * w2; if (steep) { if (!c->fragdepth_or_discard && c->depth_test) { if (!depthtest(z, ((float*)c->zbuf.lastrow)[-(int)x*c->zbuf.w + (int)intery])) { continue; } else { ((float*)c->zbuf.lastrow)[-(int)x*c->zbuf.w + (int)intery] = z; ((float*)c->zbuf.lastrow)[-(int)x*c->zbuf.w + (int)(intery+1)] = z; } } SET_VEC4(c->builtins.gl_FragCoord, intery, x, z, 1/w); setup_fs_input(t, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = 1.0 - modff(intery, &tmp); if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, intery, x, c->builtins.gl_FragDepth); SET_VEC4(c->builtins.gl_FragCoord, intery+1, x, z, 1/w); setup_fs_input(t, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = modff(intery, &tmp); if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, intery+1, x, c->builtins.gl_FragDepth); } else { if (!c->fragdepth_or_discard && c->depth_test) { if (!depthtest(z, ((float*)c->zbuf.lastrow)[-(int)intery*c->zbuf.w + (int)x])) { continue; } else { ((float*)c->zbuf.lastrow)[-(int)intery*c->zbuf.w + (int)x] = z; ((float*)c->zbuf.lastrow)[-(int)(intery+1)*c->zbuf.w + (int)x] = z; } } SET_VEC4(c->builtins.gl_FragCoord, x, intery, z, 1/w); setup_fs_input(t, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = 1.0 - modff(intery, &tmp); if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, x, intery, c->builtins.gl_FragDepth); SET_VEC4(c->builtins.gl_FragCoord, x, intery+1, z, 1/w); setup_fs_input(t, v1_out, v2_out, w1, w2, provoke); fragment_shader(c->fs_input, &c->builtins, uniform); //fragcolor.w = modff(intery, &tmp); if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, x, intery+1, c->builtins.gl_FragDepth); } } } static void draw_triangle(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke) { int c_or, c_and; c_and = v0->clip_code & v1->clip_code & v2->clip_code; if (c_and != 0) { //printf("triangle outside\n"); return; } c_or = v0->clip_code | v1->clip_code | v2->clip_code; if (c_or == 0) { draw_triangle_final(v0, v1, v2, provoke); } else { draw_triangle_clip(v0, v1, v2, provoke, 0); } } static void draw_triangle_final(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke) { int front_facing; v0->screen_space = mult_mat4_vec4(c->vp_mat, v0->clip_space); v1->screen_space = mult_mat4_vec4(c->vp_mat, v1->clip_space); v2->screen_space = mult_mat4_vec4(c->vp_mat, v2->clip_space); front_facing = is_front_facing(v0, v1, v2); if (c->cull_face) { if (c->cull_mode == GL_FRONT_AND_BACK) return; if (c->cull_mode == GL_BACK && !front_facing) { //printf("culling back face\n"); return; } if (c->cull_mode == GL_FRONT && front_facing) return; } c->builtins.gl_FrontFacing = front_facing; if (front_facing) { c->draw_triangle_front(v0, v1, v2, provoke); } else { c->draw_triangle_back(v0, v1, v2, provoke); } } /* We clip the segment [a,b] against the 6 planes of the normal volume. * We compute the point 'c' of intersection and the value of the parameter 't' * of the intersection if x=a+t(b-a). */ #define clip_func(name, sign, dir, dir1, dir2) \ static float name(vec4 *c, vec4 *a, vec4 *b) \ {\ float t, dx, dy, dz, dw, den;\ dx = (b->x - a->x);\ dy = (b->y - a->y);\ dz = (b->z - a->z);\ dw = (b->w - a->w);\ den = -(sign d ## dir) + dw;\ if (den == 0) t=0;\ else t = ( sign a->dir - a->w) / den;\ c->dir1 = a->dir1 + t * d ## dir1;\ c->dir2 = a->dir2 + t * d ## dir2;\ c->w = a->w + t * dw;\ c->dir = sign c->w;\ return t;\ } clip_func(clip_xmin, -, x, y, z) clip_func(clip_xmax, +, x, y, z) clip_func(clip_ymin, -, y, x, z) clip_func(clip_ymax, +, y, x, z) clip_func(clip_zmin, -, z, x, y) clip_func(clip_zmax, +, z, x, y) static float (*clip_proc[6])(vec4 *, vec4 *, vec4 *) = { clip_zmin, clip_zmax, clip_xmin, clip_xmax, clip_ymin, clip_ymax }; static inline void update_clip_pt(glVertex *q, glVertex *v0, glVertex *v1, float t) { for (int i=0; i<c->vs_output.size; ++i) { //why is this correct for both SMOOTH and NOPERSPECTIVE? q->vs_out[i] = v0->vs_out[i] + (v1->vs_out[i] - v0->vs_out[i]) * t; //FLAT should be handled indirectly by the provoke index //nothing to do here unless I change that } q->clip_code = gl_clipcode(q->clip_space); /* * this is done in draw_triangle currently ... q->screen_space = mult_mat4_vec4(c->vp_mat, q->clip_space); if (q->clip_code == 0) q->screen_space = mult_mat4_vec4(c->vp_mat, q->clip_space); */ } static void draw_triangle_clip(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke, int clip_bit) { int c_or, c_and, c_ex_or, cc[3], edge_flag_tmp, clip_mask; glVertex tmp1, tmp2, *q[3]; float tt; //quite a bit of stack if there's a lot of clipping ... float tmp1_out[GL_MAX_VERTEX_OUTPUT_COMPONENTS]; float tmp2_out[GL_MAX_VERTEX_OUTPUT_COMPONENTS]; tmp1.vs_out = tmp1_out; tmp2.vs_out = tmp2_out; cc[0] = v0->clip_code; cc[1] = v1->clip_code; cc[2] = v2->clip_code; /* printf("in draw_triangle_clip\n"); print_vec4(v0->clip_space, "\n"); print_vec4(v1->clip_space, "\n"); print_vec4(v2->clip_space, "\n"); printf("tmp_out tmp2_out = %p %p\n\n", tmp1_out, tmp2_out); */ c_or = cc[0] | cc[1] | cc[2]; if (c_or == 0) { draw_triangle_final(v0, v1, v2, provoke); } else { c_and = cc[0] & cc[1] & cc[2]; /* the triangle is completely outside */ if (c_and != 0) { //printf("triangle outside\n"); return; } /* find the next direction to clip */ while (clip_bit < 6 && (c_or & (1 << clip_bit)) == 0) { ++clip_bit; } /* this test can be true only in case of rounding errors */ if (clip_bit == 6) { #if 1 printf("Clipping error:\n"); print_vec4(v0->clip_space, "\n"); print_vec4(v1->clip_space, "\n"); print_vec4(v2->clip_space, "\n"); #endif return; } clip_mask = 1 << clip_bit; c_ex_or = (cc[0] ^ cc[1] ^ cc[2]) & clip_mask; if (c_ex_or) { /* one point outside */ if (cc[0] & clip_mask) { q[0]=v0; q[1]=v1; q[2]=v2; } else if (cc[1] & clip_mask) { q[0]=v1; q[1]=v2; q[2]=v0; } else { q[0]=v2; q[1]=v0; q[2]=v1; } tt = clip_proc[clip_bit](&tmp1.clip_space, &q[0]->clip_space, &q[1]->clip_space); update_clip_pt(&tmp1, q[0], q[1], tt); tt = clip_proc[clip_bit](&tmp2.clip_space, &q[0]->clip_space, &q[2]->clip_space); update_clip_pt(&tmp2, q[0], q[2], tt); tmp1.edge_flag = q[0]->edge_flag; edge_flag_tmp = q[2]->edge_flag; q[2]->edge_flag = 0; draw_triangle_clip(&tmp1, q[1], q[2], provoke, clip_bit+1); tmp2.edge_flag = 1; tmp1.edge_flag = 0; q[2]->edge_flag = edge_flag_tmp; draw_triangle_clip(&tmp2, &tmp1, q[2], provoke, clip_bit+1); } else { /* two points outside */ if ((cc[0] & clip_mask) == 0) { q[0]=v0; q[1]=v1; q[2]=v2; } else if ((cc[1] & clip_mask) == 0) { q[0]=v1; q[1]=v2; q[2]=v0; } else { q[0]=v2; q[1]=v0; q[2]=v1; } tt = clip_proc[clip_bit](&tmp1.clip_space, &q[0]->clip_space, &q[1]->clip_space); update_clip_pt(&tmp1, q[0], q[1], tt); tt = clip_proc[clip_bit](&tmp2.clip_space, &q[0]->clip_space, &q[2]->clip_space); update_clip_pt(&tmp2, q[0], q[2], tt); tmp1.edge_flag = 1; tmp2.edge_flag = q[2]->edge_flag; draw_triangle_clip(q[0], &tmp1, &tmp2, provoke, clip_bit+1); } } } static void draw_triangle_point(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke) { //TODO provoke? float fs_input[GL_MAX_VERTEX_OUTPUT_COMPONENTS]; vec3 point; glVertex* vert[3] = { v0, v1, v2 }; for (int i=0; i<3; ++i) { if (!vert[i]->edge_flag) //TODO doesn't work continue; point = vec4_to_vec3h(vert[i]->screen_space); point.z = MAP(point.z, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far); //TODO not sure if I'm supposed to do this ... doesn't say to in spec but it is called depth clamping if (c->depth_clamp) point.z = clampf_01(point.z); for (int j=0; j<c->vs_output.size; ++j) { if (c->vs_output.interpolation[j] != FLAT) { fs_input[j] = vert[i]->vs_out[j]; //would be correct from clipping } else { fs_input[j] = c->vs_output.output_buf.a[provoke*c->vs_output.size + j]; } } c->builtins.discard = GL_FALSE; c->builtins.gl_FragDepth = point.z; c->programs.a[c->cur_program].fragment_shader(fs_input, &c->builtins, c->programs.a[c->cur_program].uniform); if (!c->builtins.discard) draw_pixel(c->builtins.gl_FragColor, point.x, point.y, c->builtins.gl_FragDepth); } } static void draw_triangle_line(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke) { if (v0->edge_flag) draw_line_shader(v0->screen_space, v1->screen_space, v0->vs_out, v1->vs_out, provoke); if (v1->edge_flag) draw_line_shader(v1->screen_space, v2->screen_space, v1->vs_out, v2->vs_out, provoke); if (v2->edge_flag) draw_line_shader(v2->screen_space, v0->screen_space, v2->vs_out, v0->vs_out, provoke); } static void draw_triangle_fill(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke) { vec4 p0 = v0->screen_space; vec4 p1 = v1->screen_space; vec4 p2 = v2->screen_space; vec3 hp0 = vec4_to_vec3h(p0); vec3 hp1 = vec4_to_vec3h(p1); vec3 hp2 = vec4_to_vec3h(p2); // TODO even worth calculating or just some constant? float max_depth_slope = 0; float poly_offset = 0; if (c->poly_offset) { float dzxy[6]; dzxy[0] = fabsf((hp1.z - hp0.z)/(hp1.x - hp0.x)); dzxy[1] = fabsf((hp1.z - hp0.z)/(hp1.y - hp0.y)); dzxy[2] = fabsf((hp2.z - hp1.z)/(hp2.x - hp1.x)); dzxy[3] = fabsf((hp2.z - hp1.z)/(hp2.y - hp1.y)); dzxy[4] = fabsf((hp0.z - hp2.z)/(hp0.x - hp2.x)); dzxy[5] = fabsf((hp0.z - hp2.z)/(hp0.y - hp2.y)); max_depth_slope = dzxy[0]; for (int i=1; i<6; ++i) { if (dzxy[i] > max_depth_slope) max_depth_slope = dzxy[i]; } #define SMALLEST_INCR 0.000001; poly_offset = max_depth_slope * c->poly_factor + c->poly_units * SMALLEST_INCR; } /* print_vec4(hp0, "\n"); print_vec4(hp1, "\n"); print_vec4(hp2, "\n"); printf("%f %f %f\n", p0.w, p1.w, p2.w); print_vec3(hp0, "\n"); print_vec3(hp1, "\n"); print_vec3(hp2, "\n\n"); */ //can't think of a better/cleaner way to do this than these 8 lines float x_min = MIN(hp0.x, hp1.x); float x_max = MAX(hp0.x, hp1.x); float y_min = MIN(hp0.y, hp1.y); float y_max = MAX(hp0.y, hp1.y); x_min = MIN(hp2.x, x_min); x_max = MAX(hp2.x, x_max); y_min = MIN(hp2.y, y_min); y_max = MAX(hp2.y, y_max); int ix_max = roundf(x_max); int iy_max = roundf(y_max); /* * testing without this x_min = MAX(0, x_min); x_max = MIN(c->back_buffer.w-1, x_max); y_min = MAX(0, y_min); y_max = MIN(c->back_buffer.h-1, y_max); x_min = MAX(c->x_min, x_min); x_max = MIN(c->x_max, x_max); y_min = MAX(c->y_min, y_min); y_max = MIN(c->y_max, y_max); */ //form implicit lines Line l01 = make_Line(hp0.x, hp0.y, hp1.x, hp1.y); Line l12 = make_Line(hp1.x, hp1.y, hp2.x, hp2.y); Line l20 = make_Line(hp2.x, hp2.y, hp0.x, hp0.y); float alpha, beta, gamma, tmp, tmp2, z; float fs_input[GL_MAX_VERTEX_OUTPUT_COMPONENTS]; float perspective[GL_MAX_VERTEX_OUTPUT_COMPONENTS*3]; float* vs_output = &c->vs_output.output_buf.a[0]; for (int i=0; i<c->vs_output.size; ++i) { perspective[i] = v0->vs_out[i]/p0.w; perspective[GL_MAX_VERTEX_OUTPUT_COMPONENTS + i] = v1->vs_out[i]/p1.w; perspective[2*GL_MAX_VERTEX_OUTPUT_COMPONENTS + i] = v2->vs_out[i]/p2.w; } float inv_w0 = 1/p0.w; //is this worth it? faster than just dividing by w down below? float inv_w1 = 1/p1.w; float inv_w2 = 1/p2.w; float x, y; Shader_Builtins builtins; #pragma omp parallel for private(x, y, alpha, beta, gamma, z, tmp, tmp2, builtins, fs_input) for (int iy = y_min; iy<iy_max; ++iy) { y = iy + 0.5f; for (int ix = x_min; ix<ix_max; ++ix) { x = ix + 0.5f; //center of min pixel //see page 117 of glspec for alternative method gamma = line_func(&l01, x, y)/line_func(&l01, hp2.x, hp2.y); beta = line_func(&l20, x, y)/line_func(&l20, hp1.x, hp1.y); alpha = 1 - beta - gamma; if (alpha >= 0 && beta >= 0 && gamma >= 0) { //if it's on the edge (==0), draw if the opposite vertex is on the same side as arbitrary point -1, -2.5 //this is a deterministic way of choosing which triangle gets a pixel for triangles that share //edges (see commit message for e87e324) if ((alpha > 0 || line_func(&l12, hp0.x, hp0.y) * line_func(&l12, -1, -2.5) > 0) && (beta > 0 || line_func(&l20, hp1.x, hp1.y) * line_func(&l20, -1, -2.5) > 0) && (gamma > 0 || line_func(&l01, hp2.x, hp2.y) * line_func(&l01, -1, -2.5) > 0)) { //calculate interoplation here tmp2 = alpha*inv_w0 + beta*inv_w1 + gamma*inv_w2; z = alpha * hp0.z + beta * hp1.z + gamma * hp2.z; z = MAP(z, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far); //TODO move out (ie can I map hp1.z etc.)? z += poly_offset; // TODO have a macro that turns on pre-fragment shader depthtest/scissor test? for (int i=0; i<c->vs_output.size; ++i) { if (c->vs_output.interpolation[i] == SMOOTH) { tmp = alpha*perspective[i] + beta*perspective[GL_MAX_VERTEX_OUTPUT_COMPONENTS + i] + gamma*perspective[2*GL_MAX_VERTEX_OUTPUT_COMPONENTS + i]; fs_input[i] = tmp/tmp2; } else if (c->vs_output.interpolation[i] == NOPERSPECTIVE) { fs_input[i] = alpha * v0->vs_out[i] + beta * v1->vs_out[i] + gamma * v2->vs_out[i]; } else { // == FLAT fs_input[i] = vs_output[provoke*c->vs_output.size + i]; } } // tmp2 is 1/w interpolated... I now do that everywhere (draw_line, draw_point) SET_VEC4(builtins.gl_FragCoord, x, y, z, tmp2); builtins.discard = GL_FALSE; builtins.gl_FragDepth = z; // have to do this here instead of outside the loop because somehow openmp messes it up // TODO probably some way to prevent that but it's just copying an int so no big deal builtins.gl_InstanceID = c->builtins.gl_InstanceID; c->programs.a[c->cur_program].fragment_shader(fs_input, &builtins, c->programs.a[c->cur_program].uniform); if (!builtins.discard) { draw_pixel(builtins.gl_FragColor, x, y, builtins.gl_FragDepth); } } } } } } // TODO should this be done in colors/integers not vec4/floats? static Color blend_pixel(vec4 src, vec4 dst) { vec4* cnst = &c->blend_color; float i = MIN(src.w, 1-dst.w); vec4 Cs, Cd; switch (c->blend_sfactor) { case GL_ZERO: SET_VEC4(Cs, 0,0,0,0); break; case GL_ONE: SET_VEC4(Cs, 1,1,1,1); break; case GL_SRC_COLOR: Cs = src; break; case GL_ONE_MINUS_SRC_COLOR: SET_VEC4(Cs, 1-src.x,1-src.y,1-src.z,1-src.w); break; case GL_DST_COLOR: Cs = dst; break; case GL_ONE_MINUS_DST_COLOR: SET_VEC4(Cs, 1-dst.x,1-dst.y,1-dst.z,1-dst.w); break; case GL_SRC_ALPHA: SET_VEC4(Cs, src.w, src.w, src.w, src.w); break; case GL_ONE_MINUS_SRC_ALPHA: SET_VEC4(Cs, 1-src.w,1-src.w,1-src.w,1-src.w); break; case GL_DST_ALPHA: SET_VEC4(Cs, dst.w, dst.w, dst.w, dst.w); break; case GL_ONE_MINUS_DST_ALPHA: SET_VEC4(Cs, 1-dst.w,1-dst.w,1-dst.w,1-dst.w); break; case GL_CONSTANT_COLOR: Cs = *cnst; break; case GL_ONE_MINUS_CONSTANT_COLOR: SET_VEC4(Cs, 1-cnst->x,1-cnst->y,1-cnst->z,1-cnst->w); break; case GL_CONSTANT_ALPHA: SET_VEC4(Cs, cnst->w, cnst->w, cnst->w, cnst->w); break; case GL_ONE_MINUS_CONSTANT_ALPHA: SET_VEC4(Cs, 1-cnst->w,1-cnst->w,1-cnst->w,1-cnst->w); break; case GL_SRC_ALPHA_SATURATE: SET_VEC4(Cs, i, i, i, 1); break; /*not implemented yet * won't be until I implement dual source blending/dual output from frag shader *https://www.opengl.org/wiki/Blending#Dual_Source_Blending case GL_SRC1_COLOR: Cs = break; case GL_ONE_MINUS_SRC1_COLOR: Cs = break; case GL_SRC1_ALPHA: Cs = break; case GL_ONE_MINUS_SRC1_ALPHA: Cs = break; */ default: //should never get here printf("error unrecognized blend_sfactor!\n"); break; } switch (c->blend_dfactor) { case GL_ZERO: SET_VEC4(Cd, 0,0,0,0); break; case GL_ONE: SET_VEC4(Cd, 1,1,1,1); break; case GL_SRC_COLOR: Cd = src; break; case GL_ONE_MINUS_SRC_COLOR: SET_VEC4(Cd, 1-src.x,1-src.y,1-src.z,1-src.w); break; case GL_DST_COLOR: Cd = dst; break; case GL_ONE_MINUS_DST_COLOR: SET_VEC4(Cd, 1-dst.x,1-dst.y,1-dst.z,1-dst.w); break; case GL_SRC_ALPHA: SET_VEC4(Cd, src.w, src.w, src.w, src.w); break; case GL_ONE_MINUS_SRC_ALPHA: SET_VEC4(Cd, 1-src.w,1-src.w,1-src.w,1-src.w); break; case GL_DST_ALPHA: SET_VEC4(Cd, dst.w, dst.w, dst.w, dst.w); break; case GL_ONE_MINUS_DST_ALPHA: SET_VEC4(Cd, 1-dst.w,1-dst.w,1-dst.w,1-dst.w); break; case GL_CONSTANT_COLOR: Cd = *cnst; break; case GL_ONE_MINUS_CONSTANT_COLOR: SET_VEC4(Cd, 1-cnst->x,1-cnst->y,1-cnst->z,1-cnst->w); break; case GL_CONSTANT_ALPHA: SET_VEC4(Cd, cnst->w, cnst->w, cnst->w, cnst->w); break; case GL_ONE_MINUS_CONSTANT_ALPHA: SET_VEC4(Cd, 1-cnst->w,1-cnst->w,1-cnst->w,1-cnst->w); break; case GL_SRC_ALPHA_SATURATE: SET_VEC4(Cd, i, i, i, 1); break; /*not implemented yet case GL_SRC_ALPHA_SATURATE: Cd = break; case GL_SRC1_COLOR: Cd = break; case GL_ONE_MINUS_SRC1_COLOR: Cd = break; case GL_SRC1_ALPHA: Cd = break; case GL_ONE_MINUS_SRC1_ALPHA: Cd = break; */ default: //should never get here printf("error unrecognized blend_dfactor!\n"); break; } vec4 result; switch (c->blend_equation) { case GL_FUNC_ADD: result = add_vec4s(mult_vec4s(Cs, src), mult_vec4s(Cd, dst)); break; case GL_FUNC_SUBTRACT: result = sub_vec4s(mult_vec4s(Cs, src), mult_vec4s(Cd, dst)); break; case GL_FUNC_REVERSE_SUBTRACT: result = sub_vec4s(mult_vec4s(Cd, dst), mult_vec4s(Cs, src)); break; case GL_MIN: SET_VEC4(result, MIN(src.x, dst.x), MIN(src.y, dst.y), MIN(src.z, dst.z), MIN(src.w, dst.w)); break; case GL_MAX: SET_VEC4(result, MAX(src.x, dst.x), MAX(src.y, dst.y), MAX(src.z, dst.z), MAX(src.w, dst.w)); break; default: //should never get here printf("error unrecognized blend_equation!\n"); break; } return vec4_to_Color(result); } // source and destination colors static Color logic_ops_pixel(Color s, Color d) { switch (c->logic_func) { case GL_CLEAR: return make_Color(0,0,0,0); case GL_SET: return make_Color(255,255,255,255); case GL_COPY: return s; case GL_COPY_INVERTED: return make_Color(~s.r, ~s.g, ~s.b, ~s.a); case GL_NOOP: return d; case GL_INVERT: return make_Color(~d.r, ~d.g, ~d.b, ~d.a); case GL_AND: return make_Color(s.r & d.r, s.g & d.g, s.b & d.b, s.a & d.a); case GL_NAND: return make_Color(~(s.r & d.r), ~(s.g & d.g), ~(s.b & d.b), ~(s.a & d.a)); case GL_OR: return make_Color(s.r | d.r, s.g | d.g, s.b | d.b, s.a | d.a); case GL_NOR: return make_Color(~(s.r | d.r), ~(s.g | d.g), ~(s.b | d.b), ~(s.a | d.a)); case GL_XOR: return make_Color(s.r ^ d.r, s.g ^ d.g, s.b ^ d.b, s.a ^ d.a); case GL_EQUIV: return make_Color(~(s.r ^ d.r), ~(s.g ^ d.g), ~(s.b ^ d.b), ~(s.a ^ d.a)); case GL_AND_REVERSE: return make_Color(s.r & ~d.r, s.g & ~d.g, s.b & ~d.b, s.a & ~d.a); case GL_AND_INVERTED: return make_Color(~s.r & d.r, ~s.g & d.g, ~s.b & d.b, ~s.a & d.a); case GL_OR_REVERSE: return make_Color(s.r | ~d.r, s.g | ~d.g, s.b | ~d.b, s.a | ~d.a); case GL_OR_INVERTED: return make_Color(~s.r | d.r, ~s.g | d.g, ~s.b | d.b, ~s.a | d.a); default: puts("Unrecognized logic op!, defaulting to GL_COPY"); return s; } } static int stencil_test(u8 stencil) { int func, ref, mask; // TODO what about non-triangles, should use front values, so need to make sure that's set? if (c->builtins.gl_FrontFacing) { func = c->stencil_func; ref = c->stencil_ref; mask = c->stencil_valuemask; } else { func = c->stencil_func_back; ref = c->stencil_ref_back; mask = c->stencil_valuemask_back; } switch (func) { case GL_NEVER: return 0; case GL_LESS: return (ref & mask) < (stencil & mask); case GL_LEQUAL: return (ref & mask) <= (stencil & mask); case GL_GREATER: return (ref & mask) > (stencil & mask); case GL_GEQUAL: return (ref & mask) >= (stencil & mask); case GL_EQUAL: return (ref & mask) == (stencil & mask); case GL_NOTEQUAL: return (ref & mask) != (stencil & mask); case GL_ALWAYS: return 1; default: puts("Error: unrecognized stencil function!"); return 0; } } static void stencil_op(int stencil, int depth, u8* dest) { int op, ref, mask; // make them proper arrays in gl_context? GLenum* ops; // TODO what about non-triangles, should use front values, so need to make sure that's set? if (c->builtins.gl_FrontFacing) { ops = &c->stencil_sfail; ref = c->stencil_ref; mask = c->stencil_writemask; } else { ops = &c->stencil_sfail_back; ref = c->stencil_ref_back; mask = c->stencil_writemask_back; } op = (!stencil) ? ops[0] : ((!depth) ? ops[1] : ops[2]); u8 val = *dest; switch (op) { case GL_KEEP: return; case GL_ZERO: val = 0; break; case GL_REPLACE: val = ref; break; case GL_INCR: if (val < 255) val++; break; case GL_INCR_WRAP: val++; break; case GL_DECR: if (val > 0) val--; break; case GL_DECR_WRAP: val--; break; case GL_INVERT: val = ~val; } *dest = val & mask; } static void draw_pixel_vec2(vec4 cf, vec2 pos, float z) { /* * spec pg 110: Point rasterization produces a fragment for each framebuffer pixel whose center lies inside a square centered at the point’s (x w , y w ), with side length equal to the current point size. for a 1 pixel size point there are only 3 edge cases where more than 1 pixel center (0.5, 0.5) would fall on the very edge of a 1 pixel square. I think just drawing the upper or upper corner pixel in these cases is fine and makes sense since width and height are actually 0.01 less than full, see make_viewport_matrix TODO point size > 1 */ draw_pixel(cf, pos.x, pos.y, z); } static void draw_pixel(vec4 cf, int x, int y, float z) { if (c->scissor_test) { if (x < c->scissor_lx || y < c->scissor_ly || x >= c->scissor_ux || y >= c->scissor_uy) { return; } } //MSAA //Stencil Test TODO have to handle when there is no stencil or depth buffer //(change gl_init to make stencil and depth buffers optional) u8* stencil_dest = &c->stencil_buf.lastrow[-y*c->stencil_buf.w + x]; if (c->stencil_test) { if (!stencil_test(*stencil_dest)) { stencil_op(0, 1, stencil_dest); return; } } //Depth test if necessary if (c->depth_test) { // I made gl_FragDepth read/write, ie same == to gl_FragCoord.z going into the shader // so I can just always use gl_FragDepth here float dest_depth = ((float*)c->zbuf.lastrow)[-y*c->zbuf.w + x]; float src_depth = z; //c->builtins.gl_FragDepth; // pass as parameter? int depth_result = depthtest(src_depth, dest_depth); if (c->stencil_test) { stencil_op(1, depth_result, stencil_dest); } if (!depth_result) { return; } ((float*)c->zbuf.lastrow)[-y*c->zbuf.w + x] = src_depth; } else if (c->stencil_test) { stencil_op(1, 1, stencil_dest); } //Blending Color dest_color, src_color; u32* dest = &((u32*)c->back_buffer.lastrow)[-y*c->back_buffer.w + x]; dest_color = make_Color((*dest & c->Rmask) >> c->Rshift, (*dest & c->Gmask) >> c->Gshift, (*dest & c->Bmask) >> c->Bshift, (*dest & c->Amask) >> c->Ashift); if (c->blend) { //TODO clamp in blend_pixel? return the vec4 and clamp? src_color = blend_pixel(cf, Color_to_vec4(dest_color)); } else { cf.x = clampf_01(cf.x); cf.y = clampf_01(cf.y); cf.z = clampf_01(cf.z); cf.w = clampf_01(cf.w); src_color = vec4_to_Color(cf); } //this line neded the negation in the viewport matrix //((u32*)c->back_buffer.buf)[y*buf.w+x] = c.a << 24 | c.c << 16 | c.g << 8 | c.b; //Logic Ops if (c->logic_ops) { src_color = logic_ops_pixel(src_color, dest_color); } //Dithering //((u32*)c->back_buffer.buf)[(buf.h-1-y)*buf.w + x] = c.a << 24 | c.c << 16 | c.g << 8 | c.b; //((u32*)c->back_buffer.lastrow)[-y*c->back_buffer.w + x] = c.a << 24 | c.c << 16 | c.g << 8 | c.b; *dest = (u32)src_color.a << c->Ashift | (u32)src_color.r << c->Rshift | (u32)src_color.g << c->Gshift | (u32)src_color.b << c->Bshift; } #include <stdarg.h> /****************************************** * PORTABLEGL_IMPLEMENTATION ******************************************/ #include <stdio.h> #include <assert.h> #include <float.h> // for CHAR_BIT #include <limits.h> #ifdef DEBUG #define IS_VALID(target, error, ...) is_valid(target, error, __VA_ARGS__) #else #define IS_VALID(target, error, ...) 1 #endif int is_valid(GLenum target, GLenum error, int n, ...) { va_list argptr; va_start(argptr, n); for (int i=0; i<n; ++i) { if (target == va_arg(argptr, GLenum)) { return 1; } } va_end(argptr); if (!c->error) { c->error = error; } return 0; } // default pass through shaders for index 0 void default_vs(float* vs_output, void* vertex_attribs, Shader_Builtins* builtins, void* uniforms) { builtins->gl_Position = ((vec4*)vertex_attribs)[0]; } void default_fs(float* fs_input, Shader_Builtins* builtins, void* uniforms) { vec4* fragcolor = &builtins->gl_FragColor; //wish I could use a compound literal, stupid C++ compatibility fragcolor->x = 1.0f; fragcolor->y = 0.0f; fragcolor->z = 0.0f; fragcolor->w = 1.0f; } void init_glVertex_Array(glVertex_Array* v) { v->deleted = GL_FALSE; for (int i=0; i<GL_MAX_VERTEX_ATTRIBS; ++i) init_glVertex_Attrib(&v->vertex_attribs[i]); } void init_glVertex_Attrib(glVertex_Attrib* v) { /* GLint size; // number of components 1-4 GLenum type; // GL_FLOAT, default GLsizei stride; // GLsizei offset; // GLboolean normalized; unsigned int buf; GLboolean enabled; GLuint divisor; */ v->buf = 0; v->enabled = 0; v->divisor = 0; } #define GET_SHIFT(mask, shift) \ do {\ shift = 0;\ while ((mask & 1) == 0) {\ mask >>= 1;\ ++shift;\ }\ } while (0) int init_glContext(glContext* context, u32** back, int w, int h, int bitdepth, u32 Rmask, u32 Gmask, u32 Bmask, u32 Amask) { if (bitdepth > 32 || !back) return 0; context->user_alloced_backbuf = *back != NULL; if (!*back) { int bytes_per_pixel = (bitdepth + CHAR_BIT-1) / CHAR_BIT; *back = (u32*) malloc(w * h * bytes_per_pixel); if (!*back) return 0; } context->zbuf.buf = (u8*) malloc(w*h * sizeof(float)); if (!context->zbuf.buf) { if (!context->user_alloced_backbuf) { free(*back); *back = NULL; } return 0; } context->stencil_buf.buf = (u8*) malloc(w*h); if (!context->stencil_buf.buf) { if (!context->user_alloced_backbuf) { free(*back); *back = NULL; } free(context->zbuf.buf); return 0; } context->x_min = 0; context->y_min = 0; context->x_max = w; context->y_max = h; context->zbuf.w = w; context->zbuf.h = h; context->zbuf.lastrow = context->zbuf.buf + (h-1)*w*sizeof(float); context->stencil_buf.w = w; context->stencil_buf.h = h; context->stencil_buf.lastrow = context->stencil_buf.buf + (h-1)*w; context->back_buffer.w = w; context->back_buffer.h = h; context->back_buffer.buf = (u8*) *back; context->back_buffer.lastrow = context->back_buffer.buf + (h-1)*w*sizeof(u32); context->bitdepth = bitdepth; //not used yet context->Rmask = Rmask; context->Gmask = Gmask; context->Bmask = Bmask; context->Amask = Amask; GET_SHIFT(Rmask, context->Rshift); GET_SHIFT(Gmask, context->Gshift); GET_SHIFT(Bmask, context->Bshift); GET_SHIFT(Amask, context->Ashift); //initialize all vectors cvec_glVertex_Array(&context->vertex_arrays, 0, 3); cvec_glBuffer(&context->buffers, 0, 3); cvec_glProgram(&context->programs, 0, 3); cvec_glTexture(&context->textures, 0, 1); cvec_glVertex(&context->glverts, 0, 10); //TODO might as well just set it to MAX_VERTICES * MAX_OUTPUT_COMPONENTS cvec_float(&context->vs_output.output_buf, 0, 0); context->clear_stencil = 0; context->clear_color = make_Color(0, 0, 0, 0); SET_VEC4(context->blend_color, 0, 0, 0, 0); context->point_size = 1.0f; context->clear_depth = 1.0f; context->depth_range_near = 0.0f; context->depth_range_far = 1.0f; make_viewport_matrix(context->vp_mat, 0, 0, w, h, 1); //set flags //TODO match order in structure definition context->provoking_vert = GL_LAST_VERTEX_CONVENTION; context->cull_mode = GL_BACK; context->cull_face = GL_FALSE; context->front_face = GL_CCW; context->depth_test = GL_FALSE; context->fragdepth_or_discard = GL_FALSE; context->depth_clamp = GL_FALSE; context->depth_mask = GL_TRUE; context->blend = GL_FALSE; context->logic_ops = GL_FALSE; context->poly_offset = GL_FALSE; context->scissor_test = GL_FALSE; context->stencil_test = GL_FALSE; context->stencil_writemask = -1; // all 1s for the masks context->stencil_writemask_back = -1; context->stencil_ref = 0; context->stencil_ref_back = 0; context->stencil_valuemask = -1; context->stencil_valuemask_back = -1; context->stencil_func = GL_ALWAYS; context->stencil_func_back = GL_ALWAYS; context->stencil_sfail = GL_KEEP; context->stencil_dpfail = GL_KEEP; context->stencil_dppass = GL_KEEP; context->stencil_sfail_back = GL_KEEP; context->stencil_dpfail_back = GL_KEEP; context->stencil_dppass_back = GL_KEEP; context->logic_func = GL_COPY; context->blend_sfactor = GL_ONE; context->blend_dfactor = GL_ZERO; context->blend_equation = GL_FUNC_ADD; context->depth_func = GL_LESS; context->line_smooth = GL_FALSE; context->poly_mode_front = GL_FILL; context->poly_mode_back = GL_FILL; context->point_spr_origin = GL_UPPER_LEFT; context->poly_factor = 0.0f; context->poly_units = 0.0f; context->scissor_lx = 0; context->scissor_ly = 0; context->scissor_ux = w; context->scissor_uy = h; // According to refpages https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glPixelStore.xhtml context->unpack_alignment = 4; context->pack_alignment = 4; context->draw_triangle_front = draw_triangle_fill; context->draw_triangle_back = draw_triangle_fill; context->error = GL_NO_ERROR; //program 0 is supposed to be undefined but not invalid so I'll //just make it default glProgram tmp_prog = { default_vs, default_fs, NULL, GL_FALSE }; cvec_push_glProgram(&context->programs, tmp_prog); context->cur_program = 0; //setup default vertex_array (vao) at position 0 //we're like a compatibility profile for this but come on //no reason not to have this imo //https://www.opengl.org/wiki/Vertex_Specification#Vertex_Array_Object glVertex_Array tmp_va; init_glVertex_Array(&tmp_va); cvec_push_glVertex_Array(&context->vertex_arrays, tmp_va); context->cur_vertex_array = 0; //setup buffers and textures //need to push back once since 0 is invalid //valid buffers have to start at position 1 glBuffer tmp_buf; tmp_buf.user_owned = GL_TRUE; tmp_buf.deleted = GL_FALSE; glTexture tmp_tex; tmp_tex.user_owned = GL_TRUE; tmp_tex.deleted = GL_FALSE; tmp_tex.format = GL_RGBA; tmp_tex.type = GL_TEXTURE_UNBOUND; tmp_tex.data = NULL; tmp_tex.w = 0; tmp_tex.h = 0; tmp_tex.d = 0; cvec_push_glBuffer(&context->buffers, tmp_buf); cvec_push_glTexture(&context->textures, tmp_tex); return 1; } void free_glContext(glContext* context) { int i; free(context->zbuf.buf); free(context->stencil_buf.buf); if (!context->user_alloced_backbuf) { free(context->back_buffer.buf); } for (i=0; i<context->buffers.size; ++i) { if (!context->buffers.a[i].user_owned) { printf("freeing buffer %d\n", i); free(context->buffers.a[i].data); } } for (i=0; i<context->textures.size; ++i) { if (!context->textures.a[i].user_owned) { printf("freeing texture %d\n", i); free(context->textures.a[i].data); } } //free vectors cvec_free_glVertex_Array(&context->vertex_arrays); cvec_free_glBuffer(&context->buffers); cvec_free_glProgram(&context->programs); cvec_free_glTexture(&context->textures); cvec_free_glVertex(&context->glverts); cvec_free_float(&context->vs_output.output_buf); } void set_glContext(glContext* context) { c = context; } void* pglResizeFramebuffer(size_t w, size_t h) { u8* tmp; tmp = (u8*) realloc(c->zbuf.buf, w*h * sizeof(float)); if (!tmp) { if (c->error == GL_NO_ERROR) c->error = GL_OUT_OF_MEMORY; return NULL; } c->zbuf.buf = tmp; c->zbuf.w = w; c->zbuf.h = h; c->zbuf.lastrow = c->zbuf.buf + (h-1)*w*sizeof(float); tmp = (u8*) realloc(c->back_buffer.buf, w*h * sizeof(u32)); if (!tmp) { if (c->error == GL_NO_ERROR) c->error = GL_OUT_OF_MEMORY; return NULL; } c->back_buffer.buf = tmp; c->back_buffer.w = w; c->back_buffer.h = h; c->back_buffer.lastrow = c->back_buffer.buf + (h-1)*w*sizeof(u32); return tmp; } GLubyte* glGetString(GLenum name) { static GLubyte vendor[] = "Robert Winkler"; static GLubyte renderer[] = "PortableGL"; static GLubyte version[] = "OpenGL 3.x-ish PortableGL 0.95"; static GLubyte shading_language[] = "C/C++"; switch (name) { case GL_VENDOR: return vendor; case GL_RENDERER: return renderer; case GL_VERSION: return version; case GL_SHADING_LANGUAGE_VERSION: return shading_language; default: if (!c->error) c->error = GL_INVALID_ENUM; return 0; } } GLenum glGetError() { GLenum err = c->error; c->error = GL_NO_ERROR; return err; } void glGenVertexArrays(GLsizei n, GLuint* arrays) { glVertex_Array tmp; init_glVertex_Array(&tmp); tmp.deleted = GL_FALSE; //fill up empty slots first --n; for (int i=1; i<c->vertex_arrays.size && n>=0; ++i) { if (c->vertex_arrays.a[i].deleted) { c->vertex_arrays.a[i] = tmp; arrays[n--] = i; } } for (; n>=0; --n) { cvec_push_glVertex_Array(&c->vertex_arrays, tmp); arrays[n] = c->vertex_arrays.size-1; } } void glDeleteVertexArrays(GLsizei n, const GLuint* arrays) { for (int i=0; i<n; ++i) { if (!arrays[i] || arrays[i] >= c->vertex_arrays.size) continue; if (arrays[i] == c->cur_vertex_array) { //TODO check if memcpy isn't enough memcpy(&c->vertex_arrays.a[0], &c->vertex_arrays.a[arrays[i]], sizeof(glVertex_Array)); c->cur_vertex_array = 0; } c->vertex_arrays.a[arrays[i]].deleted = GL_TRUE; } } void glGenBuffers(GLsizei n, GLuint* buffers) { glBuffer tmp; tmp.user_owned = GL_TRUE; // NOTE: Doesn't really matter at this point tmp.data = NULL; tmp.deleted = GL_FALSE; //fill up empty slots first --n; for (int i=1; i<c->buffers.size && n>=0; ++i) { if (c->buffers.a[i].deleted) { c->buffers.a[i] = tmp; buffers[n--] = i; } } for (; n>=0; --n) { cvec_push_glBuffer(&c->buffers, tmp); buffers[n] = c->buffers.size-1; } } void glDeleteBuffers(GLsizei n, const GLuint* buffers) { GLenum type; for (int i=0; i<n; ++i) { if (!buffers[i] || buffers[i] >= c->buffers.size) continue; // NOTE(rswinkle): type is stored as correct index not the raw enum value so no need to // subtract here see glBindBuffer type = c->buffers.a[buffers[i]].type; if (buffers[i] == c->bound_buffers[type]) c->bound_buffers[type] = 0; if (!c->buffers.a[buffers[i]].user_owned) { free(c->buffers.a[buffers[i]].data); c->buffers.a[buffers[i]].data = NULL; } c->buffers.a[buffers[i]].deleted = GL_TRUE; } } void glGenTextures(GLsizei n, GLuint* textures) { glTexture tmp; //SET_VEC4(tmp.border_color, 0, 0, 0, 0); tmp.mag_filter = GL_LINEAR; tmp.min_filter = GL_LINEAR; //NOTE: spec says should be mipmap_linear tmp.wrap_s = GL_REPEAT; tmp.wrap_t = GL_REPEAT; tmp.data = NULL; tmp.deleted = GL_FALSE; tmp.user_owned = GL_TRUE; // NOTE: could be either before data tmp.format = GL_RGBA; tmp.type = GL_TEXTURE_UNBOUND; tmp.w = 0; tmp.h = 0; tmp.d = 0; --n; for (int i=0; i<c->textures.size && n>=0; ++i) { if (c->textures.a[i].deleted) { c->textures.a[i] = tmp; textures[n--] = i; } } for (; n>=0; --n) { cvec_push_glTexture(&c->textures, tmp); textures[n] = c->textures.size-1; } } void glDeleteTextures(GLsizei n, GLuint* textures) { GLenum type; for (int i=0; i<n; ++i) { if (!textures[i] || textures[i] >= c->textures.size) continue; // NOTE(rswinkle): type is stored as correct index not the raw enum value so no need to // subtract here see glBindTexture type = c->textures.a[textures[i]].type; if (textures[i] == c->bound_textures[type]) c->bound_textures[type] = 0; if (!c->textures.a[textures[i]].user_owned) { free(c->textures.a[textures[i]].data); c->textures.a[textures[i]].data = NULL; } c->textures.a[textures[i]].deleted = GL_TRUE; } } void glBindVertexArray(GLuint array) { if (array < c->vertex_arrays.size && c->vertex_arrays.a[array].deleted == GL_FALSE) { c->cur_vertex_array = array; } else if (!c->error) { c->error = GL_INVALID_OPERATION; } } void glBindBuffer(GLenum target, GLuint buffer) { //GL_ARRAY_BUFFER, GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, GL_ELEMENT_ARRAY_BUFFER, //GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER, GL_TEXTURE_BUFFER, GL_TRANSFORM_FEEDBACK_BUFFER, or GL_UNIFORM_BUFFER. if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) { if (!c->error) c->error = GL_INVALID_ENUM; return; } target -= GL_ARRAY_BUFFER; if (buffer < c->buffers.size && c->buffers.a[buffer].deleted == GL_FALSE) { c->bound_buffers[target] = buffer; // Note type isn't set till binding and we're not storing the raw // enum but the enum - GL_ARRAY_BUFFER so it's an index into c->bound_buffers // TODO need to see what's supposed to happen if you try to bind // a buffer to multiple targets c->buffers.a[buffer].type = target; } else if (!c->error) { c->error = GL_INVALID_OPERATION; } } void glBufferData(GLenum target, GLsizei size, const GLvoid* data, GLenum usage) { if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //check for usage later target -= GL_ARRAY_BUFFER; if (c->bound_buffers[target] == 0) { if (!c->error) c->error = GL_INVALID_OPERATION; return; } //always NULL or valid free(c->buffers.a[c->bound_buffers[target]].data); if (!(c->buffers.a[c->bound_buffers[target]].data = (u8*) malloc(size))) { if (!c->error) c->error = GL_OUT_OF_MEMORY; // GL state is undefined from here on return; } if (data) { memcpy(c->buffers.a[c->bound_buffers[target]].data, data, size); } c->buffers.a[c->bound_buffers[target]].user_owned = GL_FALSE; c->buffers.a[c->bound_buffers[target]].size = size; if (target == GL_ELEMENT_ARRAY_BUFFER - GL_ARRAY_BUFFER) { c->vertex_arrays.a[c->cur_vertex_array].element_buffer = c->bound_buffers[target]; } } void glBufferSubData(GLenum target, GLsizei offset, GLsizei size, const GLvoid* data) { if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) { if (!c->error) c->error = GL_INVALID_ENUM; return; } target -= GL_ARRAY_BUFFER; if (c->bound_buffers[target] == 0) { if (!c->error) c->error = GL_INVALID_OPERATION; return; } if (offset + size > c->buffers.a[c->bound_buffers[target]].size) { if (!c->error) c->error = GL_INVALID_VALUE; return; } memcpy(&c->buffers.a[c->bound_buffers[target]].data[offset], data, size); } void glBindTexture(GLenum target, GLuint texture) { if (target < GL_TEXTURE_1D || target >= GL_NUM_TEXTURE_TYPES) { if (!c->error) c->error = GL_INVALID_ENUM; return; } target -= GL_TEXTURE_UNBOUND + 1; if (texture < c->textures.size && !c->textures.a[texture].deleted) { if (c->textures.a[texture].type == GL_TEXTURE_UNBOUND) { c->bound_textures[target] = texture; c->textures.a[texture].type = target; } else if (c->textures.a[texture].type == target) { c->bound_textures[target] = texture; } else if (!c->error) { c->error = GL_INVALID_OPERATION; } } else if (!c->error) { c->error = GL_INVALID_VALUE; } } void glTexParameteri(GLenum target, GLenum pname, GLint param) { //GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D, GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_RECTANGLE, or GL_TEXTURE_CUBE_MAP. //will add others as they're implemented if (target != GL_TEXTURE_1D && target != GL_TEXTURE_2D && target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY && target != GL_TEXTURE_RECTANGLE && target != GL_TEXTURE_CUBE_MAP) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //shift to range 0 - NUM_TEXTURES-1 to access bound_textures array target -= GL_TEXTURE_UNBOUND + 1; //GL_TEXTURE_BASE_LEVEL, GL_TEXTURE_COMPARE_FUNC, GL_TEXTURE_COMPARE_MODE, GL_TEXTURE_LOD_BIAS, GL_TEXTURE_MIN_FILTER, //GL_TEXTURE_MAG_FILTER, GL_TEXTURE_MIN_LOD, GL_TEXTURE_MAX_LOD, GL_TEXTURE_MAX_LEVEL, GL_TEXTURE_SWIZZLE_R, //GL_TEXTURE_SWIZZLE_G, GL_TEXTURE_SWIZZLE_B, GL_TEXTURE_SWIZZLE_A, GL_TEXTURE_WRAP_S, GL_TEXTURE_WRAP_T, or GL_TEXTURE_WRAP_R. if (pname != GL_TEXTURE_MIN_FILTER && pname != GL_TEXTURE_MAG_FILTER && pname != GL_TEXTURE_WRAP_S && pname != GL_TEXTURE_WRAP_T && pname != GL_TEXTURE_WRAP_R) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (pname == GL_TEXTURE_MIN_FILTER) { if(param != GL_NEAREST && param != GL_LINEAR && param != GL_NEAREST_MIPMAP_NEAREST && param != GL_NEAREST_MIPMAP_LINEAR && param != GL_LINEAR_MIPMAP_NEAREST && param != GL_LINEAR_MIPMAP_LINEAR) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //TODO mipmapping isn't actually supported, not sure it's worth trouble/perf hit //just adding the enums to make porting easier if (param == GL_NEAREST_MIPMAP_NEAREST || param == GL_NEAREST_MIPMAP_LINEAR) param = GL_NEAREST; if (param == GL_LINEAR_MIPMAP_NEAREST || param == GL_LINEAR_MIPMAP_LINEAR) param = GL_LINEAR; c->textures.a[c->bound_textures[target]].min_filter = param; } else if (pname == GL_TEXTURE_MAG_FILTER) { if(param != GL_NEAREST && param != GL_LINEAR) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->textures.a[c->bound_textures[target]].mag_filter = param; } else if (pname == GL_TEXTURE_WRAP_S) { if(param != GL_REPEAT && param != GL_CLAMP_TO_EDGE && param != GL_CLAMP_TO_BORDER && param != GL_MIRRORED_REPEAT) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->textures.a[c->bound_textures[target]].wrap_s = param; } else if (pname == GL_TEXTURE_WRAP_T) { if(param != GL_REPEAT && param != GL_CLAMP_TO_EDGE && param != GL_CLAMP_TO_BORDER && param != GL_MIRRORED_REPEAT) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->textures.a[c->bound_textures[target]].wrap_t = param; } else if (pname == GL_TEXTURE_WRAP_R) { if(param != GL_REPEAT && param != GL_CLAMP_TO_EDGE && param != GL_CLAMP_TO_BORDER && param != GL_MIRRORED_REPEAT) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->textures.a[c->bound_textures[target]].wrap_r = param; } } void glPixelStorei(GLenum pname, GLint param) { if (pname != GL_UNPACK_ALIGNMENT && pname != GL_PACK_ALIGNMENT) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (param != 1 && param != 2 && param != 4 && param != 8) { if (!c->error) c->error = GL_INVALID_VALUE; return; } if (pname == GL_UNPACK_ALIGNMENT) { c->unpack_alignment = param; } else if (pname == GL_PACK_ALIGNMENT) { c->pack_alignment = param; } } void glTexImage1D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid* data) { if (target != GL_TEXTURE_1D) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (border) { if (!c->error) c->error = GL_INVALID_VALUE; return; } //ignore level for now int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1]; c->textures.a[cur_tex].w = width; if (type != GL_UNSIGNED_BYTE) { return; } int components; if (format == GL_RED) components = 1; else if (format == GL_RG) components = 2; else if (format == GL_RGB || format == GL_BGR) components = 3; else if (format == GL_RGBA || format == GL_BGRA) components = 4; else { if (!c->error) c->error = GL_INVALID_ENUM; return; } // NULL or valid free(c->textures.a[cur_tex].data); //TODO support other internal formats? components should be of internalformat not format if (!(c->textures.a[cur_tex].data = (u8*) malloc(width * components))) { if (!c->error) c->error = GL_OUT_OF_MEMORY; //undefined state now return; } u32* texdata = (u32*) c->textures.a[cur_tex].data; if (data) memcpy(&texdata[0], data, width*sizeof(u32)); c->textures.a[cur_tex].user_owned = GL_FALSE; //TODO //assume for now always RGBA coming in and that's what I'm storing it as } void glTexImage2D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* data) { //GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D, GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_RECTANGLE, or GL_TEXTURE_CUBE_MAP. //will add others as they're implemented if (target != GL_TEXTURE_2D && target != GL_TEXTURE_RECTANGLE && target != GL_TEXTURE_CUBE_MAP_POSITIVE_X && target != GL_TEXTURE_CUBE_MAP_NEGATIVE_X && target != GL_TEXTURE_CUBE_MAP_POSITIVE_Y && target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Y && target != GL_TEXTURE_CUBE_MAP_POSITIVE_Z && target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Z) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (border) { if (!c->error) c->error = GL_INVALID_VALUE; return; } //ignore level for now //TODO support other types? if (type != GL_UNSIGNED_BYTE) { if (!c->error) c->error = GL_INVALID_ENUM; return; } // TODO I don't actually support anything other than GL_RGBA for input or // internal format ... so I should probably make the others errors and // I'm not even checking internalFormat currently.. int components; if (format == GL_RED) components = 1; else if (format == GL_RG) components = 2; else if (format == GL_RGB || format == GL_BGR) components = 3; else if (format == GL_RGBA || format == GL_BGRA) components = 4; else { if (!c->error) c->error = GL_INVALID_ENUM; return; } int cur_tex; // TODO If I ever support type other than GL_UNSIGNED_BYTE (also using for both internalformat and format) int byte_width = width * components; int padding_needed = byte_width % c->unpack_alignment; int padded_row_len = (!padding_needed) ? byte_width : byte_width + c->unpack_alignment - padding_needed; if (target == GL_TEXTURE_2D || target == GL_TEXTURE_RECTANGLE) { cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1]; c->textures.a[cur_tex].w = width; c->textures.a[cur_tex].h = height; // either NULL or valid free(c->textures.a[cur_tex].data); //TODO support other internal formats? components should be of internalformat not format if (!(c->textures.a[cur_tex].data = (u8*) malloc(height * byte_width))) { if (!c->error) c->error = GL_OUT_OF_MEMORY; //undefined state now return; } if (data) { if (!padding_needed) { memcpy(c->textures.a[cur_tex].data, data, height*byte_width); } else { for (int i=0; i<height; ++i) { memcpy(&c->textures.a[cur_tex].data[i*byte_width], &((u8*)data)[i*padded_row_len], byte_width); } } } c->textures.a[cur_tex].user_owned = GL_FALSE; } else { //CUBE_MAP cur_tex = c->bound_textures[GL_TEXTURE_CUBE_MAP-GL_TEXTURE_UNBOUND-1]; // If we're reusing a texture, and we haven't already loaded // one of the planes of the cubemap, data is either NULL or valid if (!c->textures.a[cur_tex].w) free(c->textures.a[cur_tex].data); if (width != height) { //TODO spec says INVALID_VALUE, man pages say INVALID_ENUM ? if (!c->error) c->error = GL_INVALID_VALUE; return; } int mem_size = width*height*6 * components; if (c->textures.a[cur_tex].w == 0) { c->textures.a[cur_tex].w = width; c->textures.a[cur_tex].h = width; //same cause square if (!(c->textures.a[cur_tex].data = (u8*) malloc(mem_size))) { if (!c->error) c->error = GL_OUT_OF_MEMORY; //undefined state now return; } } else if (c->textures.a[cur_tex].w != width) { //TODO spec doesn't say all sides must have same dimensions but it makes sense //and this site suggests it http://www.opengl.org/wiki/Cubemap_Texture if (!c->error) c->error = GL_INVALID_VALUE; return; } target -= GL_TEXTURE_CUBE_MAP_POSITIVE_X; //use target as plane index // TODO handle different format and internalFormat int p = height*byte_width; u8* texdata = c->textures.a[cur_tex].data; if (data) { if (!padding_needed) { memcpy(&texdata[target*p], data, height*byte_width); } else { for (int i=0; i<height; ++i) { memcpy(&texdata[target*p + i*byte_width], &((u8*)data)[i*padded_row_len], byte_width); } } } c->textures.a[cur_tex].user_owned = GL_FALSE; } //end CUBE_MAP } void glTexImage3D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* data) { if (target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (border) { if (!c->error) c->error = GL_INVALID_VALUE; return; } //ignore level for now int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1]; c->textures.a[cur_tex].w = width; c->textures.a[cur_tex].h = height; c->textures.a[cur_tex].d = depth; if (type != GL_UNSIGNED_BYTE) { // TODO return; } // TODO add error? only support GL_RGBA for now int components; if (format == GL_RED) components = 1; else if (format == GL_RG) components = 2; else if (format == GL_RGB || format == GL_BGR) components = 3; else if (format == GL_RGBA || format == GL_BGRA) components = 4; else { if (!c->error) c->error = GL_INVALID_ENUM; return; } int byte_width = width * components; int padding_needed = byte_width % c->unpack_alignment; int padded_row_len = (!padding_needed) ? byte_width : byte_width + c->unpack_alignment - padding_needed; // NULL or valid free(c->textures.a[cur_tex].data); //TODO support other internal formats? components should be of internalformat not format if (!(c->textures.a[cur_tex].data = (u8*) malloc(width*height*depth * components))) { if (!c->error) c->error = GL_OUT_OF_MEMORY; //undefined state now return; } u32* texdata = (u32*) c->textures.a[cur_tex].data; if (data) { if (!padding_needed) { memcpy(texdata, data, width*height*depth*sizeof(u32)); } else { for (int i=0; i<height*depth; ++i) { memcpy(&texdata[i*byte_width], &((u8*)data)[i*padded_row_len], byte_width); } } } c->textures.a[cur_tex].user_owned = GL_FALSE; //TODO //assume for now always RGBA coming in and that's what I'm storing it as } void glTexSubImage1D(GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid* data) { if (target != GL_TEXTURE_1D) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //ignore level for now int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1]; //only kind supported currently if (type != GL_UNSIGNED_BYTE) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //TODO if (format != GL_RGBA) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (xoffset < 0 || xoffset + width > c->textures.a[cur_tex].w) { if (!c->error) c->error = GL_INVALID_VALUE; return; } u32* texdata = (u32*) c->textures.a[cur_tex].data; memcpy(&texdata[xoffset], data, width*sizeof(u32)); } void glTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* data) { //GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D, GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_RECTANGLE, or GL_TEXTURE_CUBE_MAP. //will add others as they're implemented if (target != GL_TEXTURE_2D && target != GL_TEXTURE_CUBE_MAP_POSITIVE_X && target != GL_TEXTURE_CUBE_MAP_NEGATIVE_X && target != GL_TEXTURE_CUBE_MAP_POSITIVE_Y && target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Y && target != GL_TEXTURE_CUBE_MAP_POSITIVE_Z && target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Z) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //ignore level for now if (type != GL_UNSIGNED_BYTE) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (format != GL_RGBA) { if (!c->error) c->error = GL_INVALID_ENUM; return; } int cur_tex; u32* d = (u32*) data; if (target == GL_TEXTURE_2D) { cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1]; u32* texdata = (u32*) c->textures.a[cur_tex].data; if (xoffset < 0 || xoffset + width > c->textures.a[cur_tex].w || yoffset < 0 || yoffset + height > c->textures.a[cur_tex].h) { if (!c->error) c->error = GL_INVALID_VALUE; return; } int w = c->textures.a[cur_tex].w; for (int i=0; i<height; ++i) { memcpy(&texdata[(yoffset+i)*w + xoffset], &d[i*width], width*sizeof(u32)); } } else { //CUBE_MAP cur_tex = c->bound_textures[GL_TEXTURE_CUBE_MAP-GL_TEXTURE_UNBOUND-1]; u32* texdata = (u32*) c->textures.a[cur_tex].data; int w = c->textures.a[cur_tex].w; target -= GL_TEXTURE_CUBE_MAP_POSITIVE_X; //use target as plane index int p = w*w; for (int i=0; i<height; ++i) memcpy(&texdata[p*target + (yoffset+i)*w + xoffset], &d[i*width], width*sizeof(u32)); } //end CUBE_MAP } void glTexSubImage3D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid* data) { if (target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //ignore level for now // TODO handle UNPACK alignment here as well... int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1]; if (type != GL_UNSIGNED_BYTE) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //TODO if (format != GL_RGBA) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (xoffset < 0 || xoffset + width > c->textures.a[cur_tex].w || yoffset < 0 || yoffset + height > c->textures.a[cur_tex].h || zoffset < 0 || zoffset + depth > c->textures.a[cur_tex].d) { if (!c->error) c->error = GL_INVALID_VALUE; return; } int w = c->textures.a[cur_tex].w; int h = c->textures.a[cur_tex].h; int p = w*h; u32* d = (u32*) data; u32* texdata = (u32*) c->textures.a[cur_tex].data; for (int j=0; j<depth; ++j) { for (int i=0; i<height; ++i) { memcpy(&texdata[(zoffset+j)*p + (yoffset+i)*w + xoffset], &d[j*width*height + i*width], width*sizeof(u32)); } } } void glVertexAttribPointer(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLsizei offset) { if (index >= GL_MAX_VERTEX_ATTRIBS || size < 1 || size > 4 || (!c->bound_buffers[GL_ARRAY_BUFFER-GL_ARRAY_BUFFER] && offset)) { if (!c->error) c->error = GL_INVALID_OPERATION; return; } //TODO type Specifies the data type of each component in the array. The symbolic constants GL_BYTE, GL_UNSIGNED_BYTE, GL_SHORT, //GL_UNSIGNED_SHORT, GL_INT, and GL_UNSIGNED_INT are accepted by both functions. Additionally GL_HALF_FLOAT, GL_FLOAT, GL_DOUBLE, //GL_INT_2_10_10_10_REV, and GL_UNSIGNED_INT_2_10_10_10_REV are accepted by glVertexAttribPointer. The initial value is GL_FLOAT. if (type != GL_FLOAT) { if (!c->error) c->error = GL_INVALID_ENUM; return; } glVertex_Attrib* v = &(c->vertex_arrays.a[c->cur_vertex_array].vertex_attribs[index]); v->size = size; v->type = type; //TODO expand for other types etc. v->stride = (stride) ? stride : size*sizeof(GLfloat); v->offset = offset; v->normalized = normalized; // I put ARRAY_BUFFER-itself instead of 0 to reinforce that bound_buffers is indexed that way, buffer type - GL_ARRAY_BUFFER v->buf = c->bound_buffers[GL_ARRAY_BUFFER-GL_ARRAY_BUFFER]; //can be 0 if offset is 0/NULL } void glEnableVertexAttribArray(GLuint index) { c->vertex_arrays.a[c->cur_vertex_array].vertex_attribs[index].enabled = GL_TRUE; } void glDisableVertexAttribArray(GLuint index) { c->vertex_arrays.a[c->cur_vertex_array].vertex_attribs[index].enabled = GL_FALSE; } void glVertexAttribDivisor(GLuint index, GLuint divisor) { if (index >= GL_MAX_VERTEX_ATTRIBS) { if (!c->error) c->error = GL_INVALID_VALUE; return; } c->vertex_arrays.a[c->cur_vertex_array].vertex_attribs[index].divisor = divisor; } //TODO not used vec4 get_vertex_attrib_array(glVertex_Attrib* v, GLsizei i) { //this line need work for future flexibility and handling more than floats u8* buf_pos = (u8*)c->buffers.a[v->buf].data + v->offset + v->stride*i; vec4 tmpvec4; memcpy(&tmpvec4, buf_pos, sizeof(float)*v->size); //c->cur_vertex_array->vertex_attribs[enabled[j]].buf->data; return tmpvec4; } void glDrawArrays(GLenum mode, GLint first, GLsizei count) { if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) { if (!c->error) c->error = GL_INVALID_ENUM; return; } // TODO should I just make GLsizei an uint32_t rather than int32_t? if (count < 0) { if (!c->error) c->error = GL_INVALID_VALUE; return; } if (!count) return; run_pipeline(mode, first, count, 0, 0, GL_FALSE); } void glDrawElements(GLenum mode, GLsizei count, GLenum type, GLsizei offset) { if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //error not in the spec but says type must be one of these ... strange if (type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_SHORT && type != GL_UNSIGNED_INT) { if (!c->error) c->error = GL_INVALID_ENUM; return; } // TODO should I just make GLsizei an uint32_t rather than int32_t? if (count < 0) { if (!c->error) c->error = GL_INVALID_VALUE; return; } if (!count) return; c->buffers.a[c->vertex_arrays.a[c->cur_vertex_array].element_buffer].type = type; run_pipeline(mode, offset, count, 0, 0, GL_TRUE); } void glDrawArraysInstanced(GLenum mode, GLint first, GLsizei count, GLsizei instancecount) { if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (count < 0 || instancecount < 0) { if (!c->error) c->error = GL_INVALID_VALUE; return; } if (!count || !instancecount) return; for (unsigned int instance = 0; instance < instancecount; ++instance) { run_pipeline(mode, first, count, instance, 0, GL_FALSE); } } void glDrawArraysInstancedBaseInstance(GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance) { if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (count < 0 || instancecount < 0) { if (!c->error) c->error = GL_INVALID_VALUE; return; } if (!count || !instancecount) return; for (unsigned int instance = 0; instance < instancecount; ++instance) { run_pipeline(mode, first, count, instance, baseinstance, GL_FALSE); } } void glDrawElementsInstanced(GLenum mode, GLsizei count, GLenum type, GLsizei offset, GLsizei instancecount) { if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) { if (!c->error) c->error = GL_INVALID_ENUM; return; } // NOTE: error not in the spec but says type must be one of these ... strange if (type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_SHORT && type != GL_UNSIGNED_INT) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (count < 0 || instancecount < 0) { if (!c->error) c->error = GL_INVALID_VALUE; return; } if (!count || !instancecount) return; c->buffers.a[c->vertex_arrays.a[c->cur_vertex_array].element_buffer].type = type; for (unsigned int instance = 0; instance < instancecount; ++instance) { run_pipeline(mode, offset, count, instance, 0, GL_TRUE); } } void glDrawElementsInstancedBaseInstance(GLenum mode, GLsizei count, GLenum type, GLsizei offset, GLsizei instancecount, GLuint baseinstance) { if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //error not in the spec but says type must be one of these ... strange if (type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_SHORT && type != GL_UNSIGNED_INT) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (count < 0 || instancecount < 0) { if (!c->error) c->error = GL_INVALID_VALUE; return; } if (!count || !instancecount) return; c->buffers.a[c->vertex_arrays.a[c->cur_vertex_array].element_buffer].type = type; for (unsigned int instance = 0; instance < instancecount; ++instance) { run_pipeline(mode, offset, count, instance, baseinstance, GL_TRUE); } } void glViewport(int x, int y, GLsizei width, GLsizei height) { if (width < 0 || height < 0) { if (!c->error) c->error = GL_INVALID_VALUE; return; } make_viewport_matrix(c->vp_mat, x, y, width, height, 1); c->x_min = x; c->y_min = y; c->x_max = x + width; c->y_max = y + height; } void glClearColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha) { red = clampf_01(red); green = clampf_01(green); blue = clampf_01(blue); alpha = clampf_01(alpha); vec4 tmp = { red, green, blue, alpha }; c->clear_color = vec4_to_Color(tmp); } void glClearDepth(GLclampf depth) { c->clear_depth = clampf_01(depth); } void glDepthFunc(GLenum func) { if (func < GL_LESS || func > GL_NEVER) { if (!c->error) c->error =GL_INVALID_ENUM; return; } c->depth_func = func; } void glDepthRange(GLclampf nearVal, GLclampf farVal) { c->depth_range_near = clampf_01(nearVal); c->depth_range_far = clampf_01(farVal); } void glDepthMask(GLboolean flag) { c->depth_mask = flag; } void glClear(GLbitfield mask) { if (!(mask & (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT))) { if (!c->error) c->error = GL_INVALID_VALUE; printf("failed to clear\n"); return; } // TODO since all the buffers should be the same width and height // (right? even though they're different types they should be 1 to 1), // why not just set local w and h and use for all instead of member w/h // for each framebuffer? // better to just set min/max x/y and use nested loops even when scissor is disabled? Color col = c->clear_color; if (mask & GL_COLOR_BUFFER_BIT) { if (!c->scissor_test) { for (int i=0; i<c->back_buffer.w*c->back_buffer.h; ++i) { ((u32*)c->back_buffer.buf)[i] = (u32)col.a << c->Ashift | (u32)col.r << c->Rshift | (u32)col.g << c->Gshift | (u32)col.b << c->Bshift; } } else { for (int y=c->scissor_ly; y<c->scissor_uy; ++y) { for (int x=c->scissor_lx; x<c->scissor_ux; ++x) { ((u32*)c->back_buffer.lastrow)[-y*c->back_buffer.w + x] = (u32)col.a << c->Ashift | (u32)col.r << c->Rshift | (u32)col.g << c->Gshift | (u32)col.b << c->Bshift; } } } } if (mask & GL_DEPTH_BUFFER_BIT) { if (!c->scissor_test) { //TODO try a big memcpy or other way to clear it for (int i=0; i < c->zbuf.w * c->zbuf.h; ++i) { ((float*)c->zbuf.buf)[i] = c->clear_depth; } } else { for (int y=c->scissor_ly; y<c->scissor_uy; ++y) { for (int x=c->scissor_lx; x<c->scissor_ux; ++x) { ((float*)c->zbuf.lastrow)[-y*c->zbuf.w + x] = c->clear_depth; } } } } if (mask & GL_STENCIL_BUFFER_BIT) { if (!c->scissor_test) { //TODO try a big memcpy or other way to clear it for (int i=0; i < c->stencil_buf.w * c->stencil_buf.h; ++i) { c->stencil_buf.buf[i] = c->clear_stencil; } } else { for (int y=c->scissor_ly; y<c->scissor_uy; ++y) { for (int x=c->scissor_lx; x<c->scissor_ux; ++x) { c->stencil_buf.lastrow[-y*c->stencil_buf.w + x] = c->clear_stencil; } } } } } void glEnable(GLenum cap) { switch (cap) { case GL_CULL_FACE: c->cull_face = GL_TRUE; break; case GL_DEPTH_TEST: c->depth_test = GL_TRUE; break; case GL_DEPTH_CLAMP: c->depth_clamp = GL_TRUE; break; case GL_LINE_SMOOTH: // TODO implementation needs work/upgrade //c->line_smooth = GL_TRUE; break; case GL_BLEND: c->blend = GL_TRUE; break; case GL_COLOR_LOGIC_OP: c->logic_ops = GL_TRUE; break; case GL_POLYGON_OFFSET_FILL: c->poly_offset = GL_TRUE; break; case GL_SCISSOR_TEST: c->scissor_test = GL_TRUE; break; case GL_STENCIL_TEST: c->stencil_test = GL_TRUE; break; default: if (!c->error) c->error = GL_INVALID_ENUM; } } void glDisable(GLenum cap) { switch (cap) { case GL_CULL_FACE: c->cull_face = GL_FALSE; break; case GL_DEPTH_TEST: c->depth_test = GL_FALSE; break; case GL_DEPTH_CLAMP: c->depth_clamp = GL_FALSE; break; case GL_LINE_SMOOTH: c->line_smooth = GL_FALSE; break; case GL_BLEND: c->blend = GL_FALSE; break; case GL_COLOR_LOGIC_OP: c->logic_ops = GL_FALSE; break; case GL_POLYGON_OFFSET_FILL: c->poly_offset = GL_FALSE; break; case GL_SCISSOR_TEST: c->scissor_test = GL_FALSE; break; case GL_STENCIL_TEST: c->stencil_test = GL_FALSE; break; default: if (!c->error) c->error = GL_INVALID_ENUM; } } GLboolean glIsEnabled(GLenum cap) { // make up my own enum for this? rename member as no_early_z? //GLboolean fragdepth_or_discard; switch (cap) { case GL_DEPTH_TEST: return c->depth_test; case GL_LINE_SMOOTH: return c->line_smooth; case GL_CULL_FACE: return c->cull_face; case GL_DEPTH_CLAMP: return c->depth_clamp; case GL_BLEND: return c->blend; case GL_COLOR_LOGIC_OP: return c->logic_ops; case GL_POLYGON_OFFSET_FILL: return c->poly_offset; case GL_SCISSOR_TEST: return c->scissor_test; case GL_STENCIL_TEST: return c->stencil_test; default: if (!c->error) c->error = GL_INVALID_ENUM; } return GL_FALSE; } void glGetBooleanv(GLenum pname, GLboolean* params) { // not sure it's worth adding every enum, spec says // gelGet* will convert/map types if they don't match the function switch (pname) { case GL_DEPTH_TEST: *params = c->depth_test; break; case GL_LINE_SMOOTH: *params = c->line_smooth; break; case GL_CULL_FACE: *params = c->cull_face; break; case GL_DEPTH_CLAMP: *params = c->depth_clamp; break; case GL_BLEND: *params = c->blend; break; case GL_COLOR_LOGIC_OP: *params = c->logic_ops; break; case GL_POLYGON_OFFSET_FILL: *params = c->poly_offset; break; case GL_SCISSOR_TEST: *params = c->scissor_test; break; case GL_STENCIL_TEST: *params = c->stencil_test; break; default: if (!c->error) c->error = GL_INVALID_ENUM; } } void glGetFloatv(GLenum pname, GLfloat* params) { switch (pname) { case GL_POLYGON_OFFSET_FACTOR: *params = c->poly_factor; break; case GL_POLYGON_OFFSET_UNITS: *params = c->poly_units; break; case GL_POINT_SIZE: *params = c->point_size; break; case GL_DEPTH_CLEAR_VALUE: *params = c->clear_depth; break; case GL_DEPTH_RANGE: params[0] = c->depth_range_near; params[1] = c->depth_range_near; break; default: if (!c->error) c->error = GL_INVALID_ENUM; } } void glGetIntegerv(GLenum pname, GLint* params) { // TODO maybe make all the enum/int member names match the associated ENUM? switch (pname) { case GL_STENCIL_WRITE_MASK: params[0] = c->stencil_writemask; break; case GL_STENCIL_REF: params[0] = c->stencil_ref; break; case GL_STENCIL_VALUE_MASK: params[0] = c->stencil_valuemask; break; case GL_STENCIL_FUNC: params[0] = c->stencil_func; break; case GL_STENCIL_FAIL: params[0] = c->stencil_sfail; break; case GL_STENCIL_PASS_DEPTH_FAIL: params[0] = c->stencil_dpfail; break; case GL_STENCIL_PASS_DEPTH_PASS: params[0] = c->stencil_dppass; break; case GL_STENCIL_BACK_WRITE_MASK: params[0] = c->stencil_writemask_back; break; case GL_STENCIL_BACK_REF: params[0] = c->stencil_ref_back; break; case GL_STENCIL_BACK_VALUE_MASK: params[0] = c->stencil_valuemask_back; break; case GL_STENCIL_BACK_FUNC: params[0] = c->stencil_func_back; break; case GL_STENCIL_BACK_FAIL: params[0] = c->stencil_sfail_back; break; case GL_STENCIL_BACK_PASS_DEPTH_FAIL: params[0] = c->stencil_dpfail_back; break; case GL_STENCIL_BACK_PASS_DEPTH_PASS: params[0] = c->stencil_dppass_back; break; //TODO implement glBlendFuncSeparate and glBlendEquationSeparate case GL_LOGIC_OP_MODE: params[0] = c->logic_func; break; case GL_BLEND_SRC_RGB: case GL_BLEND_SRC_ALPHA: params[0] = c->blend_sfactor; break; case GL_BLEND_DST_RGB: case GL_BLEND_DST_ALPHA: params[0] = c->blend_dfactor; break; case GL_BLEND_EQUATION_RGB: case GL_BLEND_EQUATION_ALPHA: params[0] = c->blend_equation; break; case GL_CULL_FACE_MODE: params[0] = c->cull_mode; break; case GL_FRONT_FACE: params[0] = c->front_face; break; case GL_DEPTH_FUNC: params[0] = c->depth_func; break; case GL_POINT_SPRITE_COORD_ORIGIN: params[0] = c->point_spr_origin; case GL_PROVOKING_VERTEX: params[0] = c->provoking_vert; break; case GL_POLYGON_MODE: params[0] = c->poly_mode_front; params[1] = c->poly_mode_back; break; default: if (!c->error) c->error = GL_INVALID_ENUM; } } void glCullFace(GLenum mode) { if (mode != GL_FRONT && mode != GL_BACK && mode != GL_FRONT_AND_BACK) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->cull_mode = mode; } void glFrontFace(GLenum mode) { if (mode != GL_CCW && mode != GL_CW) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->front_face = mode; } void glPolygonMode(GLenum face, GLenum mode) { if ((face != GL_FRONT && face != GL_BACK && face != GL_FRONT_AND_BACK) || (mode != GL_POINT && mode != GL_LINE && mode != GL_FILL)) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (mode == GL_POINT) { if (face == GL_FRONT) { c->poly_mode_front = mode; c->draw_triangle_front = draw_triangle_point; } else if (face == GL_BACK) { c->poly_mode_back = mode; c->draw_triangle_back = draw_triangle_point; } else { c->poly_mode_front = mode; c->poly_mode_back = mode; c->draw_triangle_front = draw_triangle_point; c->draw_triangle_back = draw_triangle_point; } } else if (mode == GL_LINE) { if (face == GL_FRONT) { c->poly_mode_front = mode; c->draw_triangle_front = draw_triangle_line; } else if (face == GL_BACK) { c->poly_mode_back = mode; c->draw_triangle_back = draw_triangle_line; } else { c->poly_mode_front = mode; c->poly_mode_back = mode; c->draw_triangle_front = draw_triangle_line; c->draw_triangle_back = draw_triangle_line; } } else { if (face == GL_FRONT) { c->poly_mode_front = mode; c->draw_triangle_front = draw_triangle_fill; } else if (face == GL_BACK) { c->poly_mode_back = mode; c->draw_triangle_back = draw_triangle_fill; } else { c->poly_mode_front = mode; c->poly_mode_back = mode; c->draw_triangle_front = draw_triangle_fill; c->draw_triangle_back = draw_triangle_fill; } } } void glPointSize(GLfloat size) { if (size <= 0.0f) { if (!c->error) c->error = GL_INVALID_VALUE; return; } c->point_size = size; } void glPointParameteri(GLenum pname, GLint param) { //also GL_POINT_FADE_THRESHOLD_SIZE if (pname != GL_POINT_SPRITE_COORD_ORIGIN || (param != GL_LOWER_LEFT && param != GL_UPPER_LEFT)) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->point_spr_origin = param; } void glProvokingVertex(GLenum provokeMode) { if (provokeMode != GL_FIRST_VERTEX_CONVENTION && provokeMode != GL_LAST_VERTEX_CONVENTION) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->provoking_vert = provokeMode; } // Shader functions GLuint pglCreateProgram(vert_func vertex_shader, frag_func fragment_shader, GLsizei n, GLenum* interpolation, GLboolean fragdepth_or_discard) { if (!vertex_shader || !fragment_shader) { //TODO set error? doesn't in spec but I'll think about it return 0; } if (n > GL_MAX_VERTEX_OUTPUT_COMPONENTS) { if (!c->error) c->error = GL_INVALID_VALUE; return 0; } glProgram tmp = {vertex_shader, fragment_shader, NULL, n, {0}, fragdepth_or_discard, GL_FALSE }; for (int i=0; i<n; ++i) { tmp.interpolation[i] = interpolation[i]; } for (int i=1; i<c->programs.size; ++i) { if (c->programs.a[i].deleted && i != c->cur_program) { c->programs.a[i] = tmp; return i; } } cvec_push_glProgram(&c->programs, tmp); return c->programs.size-1; } void glDeleteProgram(GLuint program) { if (!program) return; if (program >= c->programs.size) { if (!c->error) c->error = GL_INVALID_VALUE; return; } c->programs.a[program].deleted = GL_TRUE; } void glUseProgram(GLuint program) { if (program >= c->programs.size) { if (!c->error) c->error = GL_INVALID_VALUE; return; } c->vs_output.size = c->programs.a[program].vs_output_size; cvec_reserve_float(&c->vs_output.output_buf, c->vs_output.size * MAX_VERTICES); c->vs_output.interpolation = c->programs.a[program].interpolation; c->fragdepth_or_discard = c->programs.a[program].fragdepth_or_discard; c->cur_program = program; } void pglSetUniform(void* uniform) { //TODO check for NULL? definitely if I ever switch to storing a local //copy in glProgram c->programs.a[c->cur_program].uniform = uniform; } void glBlendFunc(GLenum sfactor, GLenum dfactor) { if (sfactor < GL_ZERO || sfactor >= NUM_BLEND_FUNCS || dfactor < GL_ZERO || dfactor >= NUM_BLEND_FUNCS) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->blend_sfactor = sfactor; c->blend_dfactor = dfactor; } void glBlendEquation(GLenum mode) { if (mode < GL_FUNC_ADD || mode >= NUM_BLEND_EQUATIONS ) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->blend_equation = mode; } void glBlendColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha) { SET_VEC4(c->blend_color, clampf_01(red), clampf_01(green), clampf_01(blue), clampf_01(alpha)); } void glLogicOp(GLenum opcode) { if (opcode < GL_CLEAR || opcode > GL_INVERT) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->logic_func = opcode; } void glPolygonOffset(GLfloat factor, GLfloat units) { c->poly_factor = factor; c->poly_units = units; } void glScissor(GLint x, GLint y, GLsizei width, GLsizei height) { // once again why is GLsizei not unsigned? if (width < 0 || height < 0) { if (!c->error) c->error = GL_INVALID_VALUE; return; } c->scissor_lx = x; c->scissor_ly = y; c->scissor_ux = x+width; c->scissor_uy = y+height; } void glStencilFunc(GLenum func, GLint ref, GLuint mask) { if (func < GL_LESS || func > GL_NEVER) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->stencil_func = func; c->stencil_func_back = func; // TODO clamp byte function? if (ref > 255) ref = 255; if (ref < 0) ref = 0; c->stencil_ref = ref; c->stencil_ref_back = ref; c->stencil_valuemask = mask; c->stencil_valuemask_back = mask; } void glStencilFuncSeparate(GLenum face, GLenum func, GLint ref, GLuint mask) { if (face < GL_FRONT || face > GL_FRONT_AND_BACK) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (face == GL_FRONT_AND_BACK) { glStencilFunc(func, ref, mask); return; } if (func < GL_LESS || func > GL_NEVER) { if (!c->error) c->error = GL_INVALID_ENUM; return; } // TODO clamp byte function? if (ref > 255) ref = 255; if (ref < 0) ref = 0; if (face == GL_FRONT) { c->stencil_func = func; c->stencil_ref = ref; c->stencil_valuemask = mask; } else { c->stencil_func_back = func; c->stencil_ref_back = ref; c->stencil_valuemask_back = mask; } } void glStencilOp(GLenum sfail, GLenum dpfail, GLenum dppass) { // TODO not sure if I should check all parameters first or // allow partial success? // // Also, how best to check when the enums aren't contiguous? empty switch? // manually checking all enums? if ((sfail < GL_INVERT || sfail > GL_DECR_WRAP) && sfail != GL_ZERO || (dpfail < GL_INVERT || dpfail > GL_DECR_WRAP) && sfail != GL_ZERO || (dppass < GL_INVERT || dppass > GL_DECR_WRAP) && sfail != GL_ZERO) { if (!c->error) c->error = GL_INVALID_ENUM; return; } c->stencil_sfail = sfail; c->stencil_dpfail = dpfail; c->stencil_dppass = dppass; c->stencil_sfail_back = sfail; c->stencil_dpfail_back = dpfail; c->stencil_dppass_back = dppass; } void glStencilOpSeparate(GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass) { if (face < GL_FRONT || face > GL_FRONT_AND_BACK) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (face == GL_FRONT_AND_BACK) { glStencilOp(sfail, dpfail, dppass); return; } if ((sfail < GL_INVERT || sfail > GL_DECR_WRAP) && sfail != GL_ZERO || (dpfail < GL_INVERT || dpfail > GL_DECR_WRAP) && sfail != GL_ZERO || (dppass < GL_INVERT || dppass > GL_DECR_WRAP) && sfail != GL_ZERO) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (face == GL_FRONT) { c->stencil_sfail = sfail; c->stencil_dpfail = dpfail; c->stencil_dppass = dppass; } else { c->stencil_sfail_back = sfail; c->stencil_dpfail_back = dpfail; c->stencil_dppass_back = dppass; } } void glClearStencil(GLint s) { // stencil is 8 bit bytes so just hardcoding FF here c->clear_stencil = s & 0xFF; } void glStencilMask(GLuint mask) { c->stencil_writemask = mask; c->stencil_writemask_back = mask; } void glStencilMaskSeparate(GLenum face, GLuint mask) { if (face < GL_FRONT || face > GL_FRONT_AND_BACK) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (face == GL_FRONT_AND_BACK) { glStencilMask(mask); return; } if (face == GL_FRONT) { c->stencil_writemask = mask; } else { c->stencil_writemask_back = mask; } } // Just wrap my pgl extension getter, unmap does nothing void* glMapBuffer(GLenum target, GLenum access) { if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) { if (!c->error) c->error = GL_INVALID_ENUM; return NULL; } if (access != GL_READ_ONLY && access != GL_WRITE_ONLY && access != GL_READ_WRITE) { if (!c->error) c->error = GL_INVALID_ENUM; return NULL; } // adjust to access bound_buffers target -= GL_ARRAY_BUFFER; void* data = NULL; pglGetBufferData(c->bound_buffers[target], &data); return data; } void* glMapNamedBuffer(GLuint buffer, GLenum access) { // pglGetBufferData will verify buffer is valid if (access != GL_READ_ONLY && access != GL_WRITE_ONLY && access != GL_READ_WRITE) { if (!c->error) c->error = GL_INVALID_ENUM; return NULL; } void* data = NULL; pglGetBufferData(buffer, &data); return data; } // Stubs to let real OpenGL libs compile with minimal modifications/ifdefs // add what you need void glGenerateMipmap(GLenum target) { //TODO not implemented, not sure it's worth it. //For example mipmap generation code see //https://github.com/thebeast33/cro_lib/blob/master/cro_mipmap.h } void glGetDoublev(GLenum pname, GLdouble* params) { } void glGetInteger64v(GLenum pname, GLint64* params) { } // Framebuffers/Renderbuffers void glGenFramebuffers(GLsizei n, GLuint* ids) {} void glBindFramebuffer(GLenum target, GLuint framebuffer) {} void glDeleteFramebuffers(GLsizei n, GLuint* framebuffers) {} void glFramebufferTexture(GLenum target, GLenum attachment, GLuint texture, GLint level) {} void glFramebufferTexture1D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level) {} void glFramebufferTexture2D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level) {} void glFramebufferTexture3D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint layer) {} GLboolean glIsFramebuffer(GLuint framebuffer) { return GL_FALSE; } void glGenRenderbuffers(GLsizei n, GLuint* renderbuffers) {} void glBindRenderbuffer(GLenum target, GLuint renderbuffer) {} void glDeleteRenderbuffers(GLsizei n, const GLuint* renderbuffers) {} void glRenderbufferStorage(GLenum target, GLenum internalformat, GLsizei width, GLsizei height) {} GLboolean glIsRenderbuffer(GLuint renderbuffer) { return GL_FALSE; } void glFramebufferRenderbuffer(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer) {} // Could also return GL_FRAMEBUFFER_UNDEFINED, but then I'd have to add all // those enums and really 0 signaling an error makes more sense GLenum glCheckFramebufferStatus(GLenum target) { return 0; } void glGetProgramiv(GLuint program, GLenum pname, GLint* params) { } void glGetProgramInfoLog(GLuint program, GLsizei maxLength, GLsizei* length, GLchar* infoLog) { } void glAttachShader(GLuint program, GLuint shader) { } void glCompileShader(GLuint shader) { } void glGetShaderInfoLog(GLuint shader, GLsizei maxLength, GLsizei* length, GLchar* infoLog) { } void glLinkProgram(GLuint program) { } void glShaderSource(GLuint shader, GLsizei count, const GLchar** string, const GLint* length) { } void glGetShaderiv(GLuint shader, GLenum pname, GLint* params) { } void glDeleteShader(GLuint shader) { } void glDetachShader(GLuint program, GLuint shader) { } GLuint glCreateProgram() { return 0; } GLuint glCreateShader(GLenum shaderType) { return 0; } GLint glGetUniformLocation(GLuint program, const GLchar* name) { return 0; } GLint glGetAttribLocation(GLuint program, const GLchar* name) { return 0; } GLboolean glUnmapBuffer(GLenum target) { return GL_TRUE; } GLboolean glUnmapNamedBuffer(GLuint buffer) { return GL_TRUE; } // TODO void glLineWidth(GLfloat width) { } void glActiveTexture(GLenum texture) { } void glTexParameterfv(GLenum target, GLenum pname, const GLfloat* params) { } void glUniform1f(GLint location, GLfloat v0) { } void glUniform2f(GLint location, GLfloat v0, GLfloat v1) { } void glUniform3f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2) { } void glUniform4f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3) { } void glUniform1i(GLint location, GLint v0) { } void glUniform2i(GLint location, GLint v0, GLint v1) { } void glUniform3i(GLint location, GLint v0, GLint v1, GLint v2) { } void glUniform4i(GLint location, GLint v0, GLint v1, GLint v2, GLint v3) { } void glUniform1ui(GLuint location, GLuint v0) { } void glUniform2ui(GLuint location, GLuint v0, GLuint v1) { } void glUniform3ui(GLuint location, GLuint v0, GLuint v1, GLuint v2) { } void glUniform4ui(GLuint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3) { } void glUniform1fv(GLint location, GLsizei count, const GLfloat* value) { } void glUniform2fv(GLint location, GLsizei count, const GLfloat* value) { } void glUniform3fv(GLint location, GLsizei count, const GLfloat* value) { } void glUniform4fv(GLint location, GLsizei count, const GLfloat* value) { } void glUniform1iv(GLint location, GLsizei count, const GLint* value) { } void glUniform2iv(GLint location, GLsizei count, const GLint* value) { } void glUniform3iv(GLint location, GLsizei count, const GLint* value) { } void glUniform4iv(GLint location, GLsizei count, const GLint* value) { } void glUniform1uiv(GLint location, GLsizei count, const GLuint* value) { } void glUniform2uiv(GLint location, GLsizei count, const GLuint* value) { } void glUniform3uiv(GLint location, GLsizei count, const GLuint* value) { } void glUniform4uiv(GLint location, GLsizei count, const GLuint* value) { } void glUniformMatrix2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { } void glUniformMatrix3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { } void glUniformMatrix4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { } void glUniformMatrix2x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { } void glUniformMatrix3x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { } void glUniformMatrix2x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { } void glUniformMatrix4x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { } void glUniformMatrix3x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { } void glUniformMatrix4x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { } /************************************* * GLSL(ish) functions *************************************/ float clampf_01(float f) { if (f < 0.0f) return 0.0f; if (f > 1.0f) return 1.0f; return f; } float clampf(float f, float min, float max) { if (f < min) return min; if (f > max) return max; return f; } int clampi(int i, int min, int max) { if (i < min) return min; if (i > max) return max; return i; } #define imod(a, b) (a) - (b) * ((a)/(b)) static int wrap(int i, int size, GLenum mode) { int tmp, tmp2; switch (mode) { case GL_REPEAT: tmp = imod(i, size); if (tmp < 0) tmp = size + tmp; return tmp; // Border is too much of a pain to implement with render to // texture. Trade offs in poor performance or ugly extra code // for a feature that almost no one actually uses and even // when it is used (barring rare/odd uv coordinates) it's not // even noticable. //case GL_CLAMP_TO_BORDER: //return clampi(i, -1, size); case GL_CLAMP_TO_BORDER: // just so stuff that uses it compiles case GL_CLAMP_TO_EDGE: return clampi(i, 0, size-1); case GL_MIRRORED_REPEAT: if (i < 0) i = -i; tmp = i / size; tmp2 = i / (2*size); // TODO what was this for? if (tmp % 2) return (size-1) - (i - tmp * size); else return i - tmp * size; return tmp; default: //should never happen, get rid of compile warning assert(0); return 0; } } #undef imod // used in the following 4 texture access functions // Not sure if it's actually necessary since wrap() clamps #define EPSILON 0.000001 vec4 texture1D(GLuint tex, float x) { int i0, i1; glTexture* t = &c->textures.a[tex]; Color* texdata = (Color*)t->data; double w = t->w - EPSILON; double xw = x * w; if (t->mag_filter == GL_NEAREST) { i0 = wrap(floor(xw), t->w, t->wrap_s); return Color_to_vec4(texdata[i0]); } else { // LINEAR // This seems right to me since pixel centers are 0.5 but // this isn't exactly what's described in the spec or FoCG i0 = wrap(floor(xw - 0.5), t->w, t->wrap_s); i1 = wrap(floor(xw + 0.499999), t->w, t->wrap_s); double tmp2; double alpha = modf(xw+0.5, &tmp2); if (alpha < 0) ++alpha; //hermite smoothing is optional //looks like my nvidia implementation doesn't do it //but it can look a little better #ifdef HERMITE_SMOOTHING alpha = alpha*alpha * (3 - 2*alpha); #endif vec4 ci = Color_to_vec4(texdata[i0]); vec4 ci1 = Color_to_vec4(texdata[i1]); ci = scale_vec4(ci, (1-alpha)); ci1 = scale_vec4(ci1, alpha); ci = add_vec4s(ci, ci1); return ci; } } vec4 texture2D(GLuint tex, float x, float y) { int i0, j0, i1, j1; glTexture* t = &c->textures.a[tex]; Color* texdata = (Color*)t->data; int w = t->w; int h = t->h; double dw = w - EPSILON; double dh = h - EPSILON; double xw = x * dw; double yh = y * dh; //TODO don't just use mag_filter all the time? //is it worth bothering? if (t->mag_filter == GL_NEAREST) { i0 = wrap(floor(xw), w, t->wrap_s); j0 = wrap(floor(yh), h, t->wrap_t); return Color_to_vec4(texdata[j0*w + i0]); } else { // LINEAR // This seems right to me since pixel centers are 0.5 but // this isn't exactly what's described in the spec or FoCG i0 = wrap(floor(xw - 0.5), w, t->wrap_s); j0 = wrap(floor(yh - 0.5), h, t->wrap_t); i1 = wrap(floor(xw + 0.499999), w, t->wrap_s); j1 = wrap(floor(yh + 0.499999), h, t->wrap_t); double tmp2; double alpha = modf(xw+0.5, &tmp2); double beta = modf(yh+0.5, &tmp2); if (alpha < 0) ++alpha; if (beta < 0) ++beta; //hermite smoothing is optional //looks like my nvidia implementation doesn't do it //but it can look a little better #ifdef HERMITE_SMOOTHING alpha = alpha*alpha * (3 - 2*alpha); beta = beta*beta * (3 - 2*beta); #endif vec4 cij = Color_to_vec4(texdata[j0*w + i0]); vec4 ci1j = Color_to_vec4(texdata[j0*w + i1]); vec4 cij1 = Color_to_vec4(texdata[j1*w + i0]); vec4 ci1j1 = Color_to_vec4(texdata[j1*w + i1]); cij = scale_vec4(cij, (1-alpha)*(1-beta)); ci1j = scale_vec4(ci1j, alpha*(1-beta)); cij1 = scale_vec4(cij1, (1-alpha)*beta); ci1j1 = scale_vec4(ci1j1, alpha*beta); cij = add_vec4s(cij, ci1j); cij = add_vec4s(cij, cij1); cij = add_vec4s(cij, ci1j1); return cij; } } vec4 texture3D(GLuint tex, float x, float y, float z) { int i0, j0, i1, j1, k0, k1; glTexture* t = &c->textures.a[tex]; Color* texdata = (Color*)t->data; double dw = t->w - EPSILON; double dh = t->h - EPSILON; double dd = t->d - EPSILON; int w = t->w; int h = t->h; int d = t->d; int plane = w * t->h; double xw = x * dw; double yh = y * dh; double zd = z * dd; if (t->mag_filter == GL_NEAREST) { i0 = wrap(floor(xw), w, t->wrap_s); j0 = wrap(floor(yh), h, t->wrap_t); k0 = wrap(floor(zd), d, t->wrap_r); return Color_to_vec4(texdata[k0*plane + j0*w + i0]); } else { // LINEAR // This seems right to me since pixel centers are 0.5 but // this isn't exactly what's described in the spec or FoCG i0 = wrap(floor(xw - 0.5), w, t->wrap_s); j0 = wrap(floor(yh - 0.5), h, t->wrap_t); k0 = wrap(floor(zd - 0.5), d, t->wrap_r); i1 = wrap(floor(xw + 0.499999), w, t->wrap_s); j1 = wrap(floor(yh + 0.499999), h, t->wrap_t); k1 = wrap(floor(zd + 0.499999), d, t->wrap_r); double tmp2; double alpha = modf(xw+0.5, &tmp2); double beta = modf(yh+0.5, &tmp2); double gamma = modf(zd+0.5, &tmp2); if (alpha < 0) ++alpha; if (beta < 0) ++beta; if (gamma < 0) ++gamma; //hermite smoothing is optional //looks like my nvidia implementation doesn't do it //but it can look a little better #ifdef HERMITE_SMOOTHING alpha = alpha*alpha * (3 - 2*alpha); beta = beta*beta * (3 - 2*beta); gamma = gamma*gamma * (3 - 2*gamma); #endif vec4 cijk = Color_to_vec4(texdata[k0*plane + j0*w + i0]); vec4 ci1jk = Color_to_vec4(texdata[k0*plane + j0*w + i1]); vec4 cij1k = Color_to_vec4(texdata[k0*plane + j1*w + i0]); vec4 ci1j1k = Color_to_vec4(texdata[k0*plane + j1*w + i1]); vec4 cijk1 = Color_to_vec4(texdata[k1*plane + j0*w + i0]); vec4 ci1jk1 = Color_to_vec4(texdata[k1*plane + j0*w + i1]); vec4 cij1k1 = Color_to_vec4(texdata[k1*plane + j1*w + i0]); vec4 ci1j1k1 = Color_to_vec4(texdata[k1*plane + j1*w + i1]); cijk = scale_vec4(cijk, (1-alpha)*(1-beta)*(1-gamma)); ci1jk = scale_vec4(ci1jk, alpha*(1-beta)*(1-gamma)); cij1k = scale_vec4(cij1k, (1-alpha)*beta*(1-gamma)); ci1j1k = scale_vec4(ci1j1k, alpha*beta*(1-gamma)); cijk1 = scale_vec4(cijk1, (1-alpha)*(1-beta)*gamma); ci1jk1 = scale_vec4(ci1jk1, alpha*(1-beta)*gamma); cij1k1 = scale_vec4(cij1k1, (1-alpha)*beta*gamma); ci1j1k1 = scale_vec4(ci1j1k1, alpha*beta*gamma); cijk = add_vec4s(cijk, ci1jk); cijk = add_vec4s(cijk, cij1k); cijk = add_vec4s(cijk, ci1j1k); cijk = add_vec4s(cijk, cijk1); cijk = add_vec4s(cijk, ci1jk1); cijk = add_vec4s(cijk, cij1k1); cijk = add_vec4s(cijk, ci1j1k1); return cijk; } } // for now this should work vec4 texture2DArray(GLuint tex, float x, float y, int z) { int i0, j0, i1, j1; glTexture* t = &c->textures.a[tex]; Color* texdata = (Color*)t->data; int w = t->w; int h = t->h; double dw = w - EPSILON; double dh = h - EPSILON; int plane = w * h; double xw = x * dw; double yh = y * dh; if (t->mag_filter == GL_NEAREST) { i0 = wrap(floor(xw), w, t->wrap_s); j0 = wrap(floor(yh), h, t->wrap_t); return Color_to_vec4(texdata[z*plane + j0*w + i0]); } else { // LINEAR // This seems right to me since pixel centers are 0.5 but // this isn't exactly what's described in the spec or FoCG i0 = wrap(floor(xw - 0.5), w, t->wrap_s); j0 = wrap(floor(yh - 0.5), h, t->wrap_t); i1 = wrap(floor(xw + 0.499999), w, t->wrap_s); j1 = wrap(floor(yh + 0.499999), h, t->wrap_t); double tmp2; double alpha = modf(xw+0.5, &tmp2); double beta = modf(yh+0.5, &tmp2); if (alpha < 0) ++alpha; if (beta < 0) ++beta; //hermite smoothing is optional //looks like my nvidia implementation doesn't do it //but it can look a little better #ifdef HERMITE_SMOOTHING alpha = alpha*alpha * (3 - 2*alpha); beta = beta*beta * (3 - 2*beta); #endif vec4 cij = Color_to_vec4(texdata[z*plane + j0*w + i0]); vec4 ci1j = Color_to_vec4(texdata[z*plane + j0*w + i1]); vec4 cij1 = Color_to_vec4(texdata[z*plane + j1*w + i0]); vec4 ci1j1 = Color_to_vec4(texdata[z*plane + j1*w + i1]); cij = scale_vec4(cij, (1-alpha)*(1-beta)); ci1j = scale_vec4(ci1j, alpha*(1-beta)); cij1 = scale_vec4(cij1, (1-alpha)*beta); ci1j1 = scale_vec4(ci1j1, alpha*beta); cij = add_vec4s(cij, ci1j); cij = add_vec4s(cij, cij1); cij = add_vec4s(cij, ci1j1); return cij; } } vec4 texture_rect(GLuint tex, float x, float y) { int i0, j0, i1, j1; glTexture* t = &c->textures.a[tex]; Color* texdata = (Color*)t->data; int w = t->w; int h = t->h; double xw = x; double yh = y; //TODO don't just use mag_filter all the time? //is it worth bothering? if (t->mag_filter == GL_NEAREST) { i0 = wrap(floor(xw), w, t->wrap_s); j0 = wrap(floor(yh), h, t->wrap_t); return Color_to_vec4(texdata[j0*w + i0]); } else { // LINEAR // This seems right to me since pixel centers are 0.5 but // this isn't exactly what's described in the spec or FoCG i0 = wrap(floor(xw - 0.5), w, t->wrap_s); j0 = wrap(floor(yh - 0.5), h, t->wrap_t); i1 = wrap(floor(xw + 0.499999), w, t->wrap_s); j1 = wrap(floor(yh + 0.499999), h, t->wrap_t); double tmp2; double alpha = modf(xw+0.5, &tmp2); double beta = modf(yh+0.5, &tmp2); if (alpha < 0) ++alpha; if (beta < 0) ++beta; //hermite smoothing is optional //looks like my nvidia implementation doesn't do it //but it can look a little better #ifdef HERMITE_SMOOTHING alpha = alpha*alpha * (3 - 2*alpha); beta = beta*beta * (3 - 2*beta); #endif vec4 cij = Color_to_vec4(texdata[j0*w + i0]); vec4 ci1j = Color_to_vec4(texdata[j0*w + i1]); vec4 cij1 = Color_to_vec4(texdata[j1*w + i0]); vec4 ci1j1 = Color_to_vec4(texdata[j1*w + i1]); cij = scale_vec4(cij, (1-alpha)*(1-beta)); ci1j = scale_vec4(ci1j, alpha*(1-beta)); cij1 = scale_vec4(cij1, (1-alpha)*beta); ci1j1 = scale_vec4(ci1j1, alpha*beta); cij = add_vec4s(cij, ci1j); cij = add_vec4s(cij, cij1); cij = add_vec4s(cij, ci1j1); return cij; } } vec4 texture_cubemap(GLuint texture, float x, float y, float z) { glTexture* tex = &c->textures.a[texture]; Color* texdata = (Color*)tex->data; float x_mag = (x < 0) ? -x : x; float y_mag = (y < 0) ? -y : y; float z_mag = (z < 0) ? -z : z; float s, t, max; int p, i0, j0, i1, j1; //there should be a better/shorter way to do this ... if (x_mag > y_mag) { if (x_mag > z_mag) { //x largest max = x_mag; t = -y; if (x_mag == x) { p = 0; s = -z; } else { p = 1; s = z; } } else { //z largest max = z_mag; t = -y; if (z_mag == z) { p = 4; s = x; } else { p = 5; s = -x; } } } else { if (y_mag > z_mag) { //y largest max = y_mag; s = x; if (y_mag == y) { p = 2; t = z; } else { p = 3; t = -z; } } else { //z largest max = z_mag; t = -y; if (z_mag == z) { p = 4; s = x; } else { p = 5; s = -x; } } } x = (s/max + 1.0f)/2.0f; y = (t/max + 1.0f)/2.0f; int w = tex->w; int h = tex->h; double dw = w - EPSILON; double dh = h - EPSILON; int plane = w*w; double xw = x * dw; double yh = y * dh; if (tex->mag_filter == GL_NEAREST) { i0 = wrap(floor(xw), w, tex->wrap_s); j0 = wrap(floor(yh), h, tex->wrap_t); vec4 tmpvec4 = Color_to_vec4(texdata[p*plane + j0*w + i0]); return tmpvec4; } else { // LINEAR // This seems right to me since pixel centers are 0.5 but // this isn't exactly what's described in the spec or FoCG i0 = wrap(floor(xw - 0.5), tex->w, tex->wrap_s); j0 = wrap(floor(yh - 0.5), tex->h, tex->wrap_t); i1 = wrap(floor(xw + 0.499999), tex->w, tex->wrap_s); j1 = wrap(floor(yh + 0.499999), tex->h, tex->wrap_t); double tmp2; double alpha = modf(xw+0.5, &tmp2); double beta = modf(yh+0.5, &tmp2); if (alpha < 0) ++alpha; if (beta < 0) ++beta; //hermite smoothing is optional //looks like my nvidia implementation doesn't do it //but it can look a little better #ifdef HERMITE_SMOOTHING alpha = alpha*alpha * (3 - 2*alpha); beta = beta*beta * (3 - 2*beta); #endif vec4 cij = Color_to_vec4(texdata[p*plane + j0*w + i0]); vec4 ci1j = Color_to_vec4(texdata[p*plane + j0*w + i1]); vec4 cij1 = Color_to_vec4(texdata[p*plane + j1*w + i0]); vec4 ci1j1 = Color_to_vec4(texdata[p*plane + j1*w + i1]); cij = scale_vec4(cij, (1-alpha)*(1-beta)); ci1j = scale_vec4(ci1j, alpha*(1-beta)); cij1 = scale_vec4(cij1, (1-alpha)*beta); ci1j1 = scale_vec4(ci1j1, alpha*beta); cij = add_vec4s(cij, ci1j); cij = add_vec4s(cij, cij1); cij = add_vec4s(cij, ci1j1); return cij; } } #undef EPSILON //Raw draw functions that bypass the OpenGL pipeline and draw //points/lines/triangles directly to the framebuffer, modify as needed. // //Example modifications: //add the blending part of OpenGL to put_pixel //change them to take vec4's instead of Color's //change put_triangle to draw all one color or have a separate path/function //that draws a single color triangle faster (no need to blend) // //pass the framebuffer in instead of drawing to c->back_buffer so //you can use it elsewhere, independently of a glContext //etc. // void pglClearScreen() { memset(c->back_buffer.buf, 255, c->back_buffer.w * c->back_buffer.h * 4); } void pglSetInterp(GLsizei n, GLenum* interpolation) { c->programs.a[c->cur_program].vs_output_size = n; c->vs_output.size = n; memcpy(c->programs.a[c->cur_program].interpolation, interpolation, n*sizeof(GLenum)); cvec_reserve_float(&c->vs_output.output_buf, n * MAX_VERTICES); //vs_output.interpolation would be already pointing at current program's array //unless the programs array was realloced since the last glUseProgram because //they've created a bunch of programs. Unlikely they'd be changing a shader //before creating all their shaders but whatever. c->vs_output.interpolation = c->programs.a[c->cur_program].interpolation; } //TODO //pglDrawRect(x, y, w, h) //pglDrawPoint(x, y) void pglDrawFrame() { frag_func frag_shader = c->programs.a[c->cur_program].fragment_shader; Shader_Builtins builtins; #pragma omp parallel for private(builtins) for (int y=0; y<c->back_buffer.h; ++y) { for (int x=0; x<c->back_buffer.w; ++x) { //ignore z and w components builtins.gl_FragCoord.x = x + 0.5f; builtins.gl_FragCoord.y = y + 0.5f; builtins.discard = GL_FALSE; frag_shader(NULL, &builtins, c->programs.a[c->cur_program].uniform); if (!builtins.discard) draw_pixel(builtins.gl_FragColor, x, y, 0.0f); //depth isn't used for pglDrawFrame } } } void pglBufferData(GLenum target, GLsizei size, const GLvoid* data, GLenum usage) { if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) { if (!c->error) c->error = GL_INVALID_ENUM; return; } //check for usage later target -= GL_ARRAY_BUFFER; if (c->bound_buffers[target] == 0) { if (!c->error) c->error = GL_INVALID_OPERATION; return; } // data can't be null for user_owned data if (!data) { if (!c->error) c->error = GL_INVALID_VALUE; return; } // TODO Should I change this in spec functions too? Or just say don't mix them // otherwise bad things/undefined behavior?? if (!c->buffers.a[c->bound_buffers[target]].user_owned) { free(c->buffers.a[c->bound_buffers[target]].data); } // user_owned buffer, just assign the pointer, will not free c->buffers.a[c->bound_buffers[target]].data = (u8*)data; c->buffers.a[c->bound_buffers[target]].user_owned = GL_TRUE; c->buffers.a[c->bound_buffers[target]].size = size; if (target == GL_ELEMENT_ARRAY_BUFFER) { c->vertex_arrays.a[c->cur_vertex_array].element_buffer = c->bound_buffers[target]; } } void pglTexImage1D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid* data) { if (target != GL_TEXTURE_1D) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (border) { if (!c->error) c->error = GL_INVALID_VALUE; return; } // data can't be null for user_owned data if (!data) { if (!c->error) c->error = GL_INVALID_VALUE; return; } //ignore level for now int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1]; c->textures.a[cur_tex].w = width; if (type != GL_UNSIGNED_BYTE) { return; } int components; if (format == GL_RED) components = 1; else if (format == GL_RG) components = 2; else if (format == GL_RGB || format == GL_BGR) components = 3; else if (format == GL_RGBA || format == GL_BGRA) components = 4; else { if (!c->error) c->error = GL_INVALID_ENUM; return; } // TODO see pglBufferData if (!c->textures.a[cur_tex].user_owned) free(c->textures.a[cur_tex].data); //TODO support other internal formats? components should be of internalformat not format c->textures.a[cur_tex].data = (u8*)data; c->textures.a[cur_tex].user_owned = GL_TRUE; //TODO //assume for now always RGBA coming in and that's what I'm storing it as } void pglTexImage2D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* data) { //GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D, GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_RECTANGLE, or GL_TEXTURE_CUBE_MAP. //will add others as they're implemented if (target != GL_TEXTURE_2D && target != GL_TEXTURE_RECTANGLE && target != GL_TEXTURE_CUBE_MAP_POSITIVE_X && target != GL_TEXTURE_CUBE_MAP_NEGATIVE_X && target != GL_TEXTURE_CUBE_MAP_POSITIVE_Y && target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Y && target != GL_TEXTURE_CUBE_MAP_POSITIVE_Z && target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Z) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (border) { if (!c->error) c->error = GL_INVALID_VALUE; return; } // data can't be null for user_owned data if (!data) { if (!c->error) c->error = GL_INVALID_VALUE; return; } //ignore level for now //TODO support other types? if (type != GL_UNSIGNED_BYTE) { if (!c->error) c->error = GL_INVALID_ENUM; return; } // TODO I don't actually support anything other than GL_RGBA for input or // internal format ... so I should probably make the others errors and // I'm not even checking internalFormat currently.. int components; if (format == GL_RED) components = 1; else if (format == GL_RG) components = 2; else if (format == GL_RGB || format == GL_BGR) components = 3; else if (format == GL_RGBA || format == GL_BGRA) components = 4; else { if (!c->error) c->error = GL_INVALID_ENUM; return; } int cur_tex; if (target == GL_TEXTURE_2D || target == GL_TEXTURE_RECTANGLE) { cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1]; c->textures.a[cur_tex].w = width; c->textures.a[cur_tex].h = height; // TODO see pglBufferData if (!c->textures.a[cur_tex].user_owned) free(c->textures.a[cur_tex].data); //TODO support other internal formats? components should be of internalformat not format // If you're using these pgl mapped functions, it assumes you are respecting // your own current unpack alignment settings already c->textures.a[cur_tex].data = (u8*)data; c->textures.a[cur_tex].user_owned = GL_TRUE; } else { //CUBE_MAP /* * TODO, doesn't make sense to call this six times when mapping, you'd set * them all up beforehand and set the pointer once...so change this or * make a pglCubeMapData() function? * cur_tex = c->bound_textures[GL_TEXTURE_CUBE_MAP-GL_TEXTURE_UNBOUND-1]; // TODO see pglBufferData if (!c->textures.a[cur_tex].user_owned) free(c->textures.a[cur_tex].data); if (width != height) { //TODO spec says INVALID_VALUE, man pages say INVALID_ENUM ? if (!c->error) c->error = GL_INVALID_VALUE; return; } int mem_size = width*height*6 * components; if (c->textures.a[cur_tex].w == 0) { c->textures.a[cur_tex].w = width; c->textures.a[cur_tex].h = width; //same cause square } else if (c->textures.a[cur_tex].w != width) { //TODO spec doesn't say all sides must have same dimensions but it makes sense //and this site suggests it http://www.opengl.org/wiki/Cubemap_Texture if (!c->error) c->error = GL_INVALID_VALUE; return; } target -= GL_TEXTURE_CUBE_MAP_POSITIVE_X; //use target as plane index c->textures.a[cur_tex].data = (u8*)data; c->textures.a[cur_tex].user_owned = GL_TRUE; */ } //end CUBE_MAP } void pglTexImage3D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* data) { if (target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY) { if (!c->error) c->error = GL_INVALID_ENUM; return; } if (border) { if (!c->error) c->error = GL_INVALID_VALUE; return; } // data can't be null for user_owned data if (!data) { if (!c->error) c->error = GL_INVALID_VALUE; return; } //ignore level for now int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1]; c->textures.a[cur_tex].w = width; c->textures.a[cur_tex].h = height; c->textures.a[cur_tex].d = depth; if (type != GL_UNSIGNED_BYTE) { // TODO return; } // TODO add error? only support GL_RGBA for now int components; if (format == GL_RED) components = 1; else if (format == GL_RG) components = 2; else if (format == GL_RGB || format == GL_BGR) components = 3; else if (format == GL_RGBA || format == GL_BGRA) components = 4; else { if (!c->error) c->error = GL_INVALID_ENUM; return; } // TODO see pglBufferData if (!c->textures.a[cur_tex].user_owned) free(c->textures.a[cur_tex].data); //TODO support other internal formats? components should be of internalformat not format c->textures.a[cur_tex].data = (u8*)data; c->textures.a[cur_tex].user_owned = GL_TRUE; //TODO //assume for now always RGBA coming in and that's what I'm storing it as } void pglGetBufferData(GLuint buffer, GLvoid** data) { // why'd you even call it? if (!data) { if (!c->error) { c->error = GL_INVALID_VALUE; } return; } if (buffer && buffer < c->buffers.size && !c->buffers.a[buffer].deleted) { *data = c->buffers.a[buffer].data; } else if (!c->error) { c->error = GL_INVALID_OPERATION; // matching error code of binding invalid buffer } } void pglGetTextureData(GLuint texture, GLvoid** data) { // why'd you even call it? if (!data) { if (!c->error) { c->error = GL_INVALID_VALUE; } return; } if (texture < c->textures.size && !c->textures.a[texture].deleted) { *data = c->textures.a[texture].data; } else if (!c->error) { c->error = GL_INVALID_OPERATION; // matching error code of binding invalid buffer } } void put_pixel(Color color, int x, int y) { u32* dest = &((u32*)c->back_buffer.lastrow)[-y*c->back_buffer.w + x]; *dest = color.a << c->Ashift | color.r << c->Rshift | color.g << c->Gshift | color.b << c->Bshift; } //Should I have it take a glFramebuffer as paramater? void put_line(Color the_color, float x1, float y1, float x2, float y2) { float tmp; //always draw from left to right if (x2 < x1) { tmp = x1; x1 = x2; x2 = tmp; tmp = y1; y1 = y2; y2 = tmp; } //calculate slope and implicit line parameters once float m = (y2-y1)/(x2-x1); float A = y1 - y2; float B = x2 - x1; float C = x1*y2 -x2*y1; int x, y; float x_min = MAX(0, MIN(x1, x2)); float x_max = MIN(c->back_buffer.w-1, MAX(x1, x2)); float y_min = MAX(0, MIN(y1, y2)); float y_max = MIN(c->back_buffer.h-1, MAX(y1, y2)); //4 cases based on slope if (m <= -1) { //(-infinite, -1] x = x1; for (y=y_max; y>=y_min; --y) { put_pixel(the_color, x, y); if (A*(x+0.5f) + B*(y-1) + C < 0) x++; } } else if (m <= 0) { //(-1, 0] y = y1; for (x=x_min; x<=x_max; ++x) { put_pixel(the_color, x, y); if (A*(x+1) + B*(y-0.5f) + C > 0) y--; } } else if (m <= 1) { //(0, 1] y = y1; for (x=x_min; x<=x_max; ++x) { put_pixel(the_color, x, y); if (A*(x+1) + B*(y+0.5f) + C < 0) y++; } } else { //(1, +infinite) x = x1; for (y=y_min; y<=y_max; ++y) { put_pixel(the_color, x, y); if (A*(x+0.5f) + B*(y+1) + C > 0) x++; } } } void put_triangle(Color c1, Color c2, Color c3, vec2 p1, vec2 p2, vec2 p3) { //can't think of a better/cleaner way to do this than these 8 lines float x_min = MIN(floor(p1.x), floor(p2.x)); float x_max = MAX(ceil(p1.x), ceil(p2.x)); float y_min = MIN(floor(p1.y), floor(p2.y)); float y_max = MAX(ceil(p1.y), ceil(p2.y)); x_min = MIN(floor(p3.x), x_min); x_max = MAX(ceil(p3.x), x_max); y_min = MIN(floor(p3.y), y_min); y_max = MAX(ceil(p3.y), y_max); x_min = MAX(0, x_min); x_max = MIN(c->back_buffer.w-1, x_max); y_min = MAX(0, y_min); y_max = MIN(c->back_buffer.h-1, y_max); //form implicit lines Line l12 = make_Line(p1.x, p1.y, p2.x, p2.y); Line l23 = make_Line(p2.x, p2.y, p3.x, p3.y); Line l31 = make_Line(p3.x, p3.y, p1.x, p1.y); float alpha, beta, gamma; Color c; float x, y; //y += 0.5f; //center of pixel // TODO(rswinkle): floor( + 0.5f) like draw_triangle? for (y=y_min; y<=y_max; ++y) { for (x=x_min; x<=x_max; ++x) { gamma = line_func(&l12, x, y)/line_func(&l12, p3.x, p3.y); beta = line_func(&l31, x, y)/line_func(&l31, p2.x, p2.y); alpha = 1 - beta - gamma; if (alpha >= 0 && beta >= 0 && gamma >= 0) //if it's on the edge (==0), draw if the opposite vertex is on the same side as arbitrary point -1, -1 //this is a deterministic way of choosing which triangle gets a pixel for trinagles that share //edges if ((alpha > 0 || line_func(&l23, p1.x, p1.y) * line_func(&l23, -1, -1) > 0) && (beta > 0 || line_func(&l31, p2.x, p2.y) * line_func(&l31, -1, -1) > 0) && (gamma > 0 || line_func(&l12, p3.x, p3.y) * line_func(&l12, -1, -1) > 0)) { //calculate interoplation here c.r = alpha*c1.r + beta*c2.r + gamma*c3.r; c.g = alpha*c1.g + beta*c2.g + gamma*c3.g; c.b = alpha*c1.b + beta*c2.b + gamma*c3.b; put_pixel(c, x, y); } } } } #undef PORTABLEGL_IMPLEMENTATION #undef CVECTOR_float_IMPLEMENTATION #endif #ifdef MANGLE_TYPES #undef vec2 #undef vec3 #undef vec4 #undef dvec2 #undef dvec3 #undef dvec4 #undef ivec2 #undef ivec3 #undef ivec4 #undef uvec2 #undef uvec3 #undef uvec4 #undef mat2 #undef mat3 #undef mat4 #undef Color #undef Line #undef Plane #endif
tsp_rnd04.c
/* Description: This program executes my "Random Swapping" algorithm to solve the "Travelling Salesman Problem" Author: Georgios Evangelou (1046900) Year: 5 Parallel Programming in Machine Learning Problems Electrical and Computer Engineering Department, University of Patras System Specifications: CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips) GPU: Nvidia GTX 1050 (dual-fan, overclocked) RAM: 8GB (dual-channel, @2666 MHz) Version Notes: Compiles/Runs/Debugs with: gcc tsp_rnd04.c -o tsp_rnd04 -lm -fopt-info -pg -fopenmp -O3 && time ./tsp_rnd04 && gprof ./tsp_rnd04 Executes the algorithm for 10.000 cities, spanning in an area of 1.000x1.000 km and produces correct results Inherits all settings of the previous version unless stated otherwise Was used as a test to determine performance increment when using multiple threads. The use of rand() by multiple threads made true parallel processing almost infeasible. */ // **************************************************************************************************************** #pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline") //Apply O3 and extra optimizations #pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system #pragma GCC target("avx") //Enable AVX // **************************************************************************************************************** #include "stdio.h" #include "stdlib.h" #include "math.h" #include "omp.h" // **************************************************************************************************************** #define N 10000 #define Nx 1000 #define Ny 1000 #define VACANT_POSITION_CODE -999999 #define TOTAL_BATCHES 1e8 #define BATCH_SIZE 1200000 #define BATCH_SIZE_PER_RESCHEDULING 100000 #define DEFAULT_MAX_REPETITIONS TOTAL_BATCHES/BATCH_SIZE // **************************************************************************************************************** float CitiesX[N]; float CitiesY[N]; int Path[N+1]; omp_lock_t Locks[N+1]; // **************************************************************************************************************** // Initializes the cities' positions // **************************************************************************************************************** void SetCities() { printf("Now initializing the positions of the cities...\n"); for (int i=0; i<N; i++) { CitiesX[i] = Nx * (float) rand() / RAND_MAX; CitiesY[i] = Ny * (float) rand() / RAND_MAX; } } // **************************************************************************************************************** // Initializes the traveling path // **************************************************************************************************************** void ResetPath() { printf("Now initializing the path...\n"); for (int i=0; i<N+1; i++) Path[i] = -1; } // **************************************************************************************************************** // Checks if a city is already in the path // **************************************************************************************************************** int IsInPath(int k) { for (int i=0; i<N; i++) if (Path[i] == k) return 1; return 0; } // **************************************************************************************************************** // Creates a random path // **************************************************************************************************************** void RandomizePath() { int k; printf("Now randomizing the path...\n"); Path[0] = (N*rand())/RAND_MAX; Path[N] = Path[0]; for (int i=1; i<N; i++) { do { k = ((float)N*rand())/RAND_MAX; } while (IsInPath(k) == 1); Path[i] = k; } } // **************************************************************************************************************** // Prints the cities' positions // **************************************************************************************************************** void PrintCities() { int x, y; printf("> The cities are:\n"); for (int i=0; i<N; i++) { printf(">> City: %6d X:%5.2f Y:%5.2f\n", i, CitiesX[i], CitiesY[i] ); } printf("\n"); } // **************************************************************************************************************** // Visually maps the cities' positions // **************************************************************************************************************** void MapCities() { int Map[Ny+1][Nx+1]; printf("Now creating a visual map of the cities...\n"); for (int i=0; i<Nx+1; i++) for (int j=0; j<Ny+1; j++) Map[j][i] = (float) VACANT_POSITION_CODE; //printf("Quantized coordinates are:\n"); for (int c=0; c<N; c++) { int x = (int) CitiesX[c] ; int y = (int) CitiesY[c] ; //printf(" City:%d y=%d and x=%d\n",c,y,x); if (Map[y][x] == VACANT_POSITION_CODE) Map[y][x] = c+1; else Map[y][x] = -1; } printf("This is the cities' map:\n"); printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); for (int y=0; y<Ny+1; y++){ for (int x=0; x<Nx+1; x++) printf("%8d ", Map[y][x]); printf("\n"); } printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); printf("\n"); } // **************************************************************************************************************** // Finds Squared Euclidean Distance between two cities // **************************************************************************************************************** double Distance(int A, int B) { double result = sqrt( (CitiesX[A]-CitiesX[B])*(CitiesX[A]-CitiesX[B]) + (CitiesY[A]-CitiesY[B])*(CitiesY[A]-CitiesY[B]) ); //double result = (CitiesX[A]-CitiesX[B])*(CitiesX[A]-CitiesX[B]) + (CitiesY[A]-CitiesY[B])*(CitiesY[A]-CitiesY[B]) ; return result; } // **************************************************************************************************************** // Finds Euclidean Distance in current path // **************************************************************************************************************** double PathDistance() { double totDist = 0.0; //#pragma omp simd reduction(+:totDist) //makes no difference //#pragma omp parallel for reduction(+:totDist) //slightly faster without it for (int i=0; i<N; i++) { totDist += Distance(Path[i], Path[i+1]); } totDist += Distance(Path[N], Path[0]); return totDist; } // **************************************************************************************************************** // Swaps cities if swapping results in shorter Distance // **************************************************************************************************************** double SwapCities(double totDist) { double totDistChange = 0.0; #pragma omp parallel for reduction(+:totDistChange) schedule(static, BATCH_SIZE_PER_RESCHEDULING) //without this the program is vastly faster (single core) for (int counter=0; counter<BATCH_SIZE; counter++) //#pragma omp parallel reduction(+:distChange) //removing totDistChange { //srand((int) time(NULL) ^ omp_get_thread_num()); //severely hurts performance int A = (rand() % (N-1 - 1 + 1)) + 1; //Picking a random index inside Path (0 < A < N) int B = (rand() % (N-1 - 1 + 1)) + 1; //Picking a random index inside Path (0 < B < N) while (A==B) B = (rand() % (N-1 - 1 + 1)) + 1; //If B==A, find another B if (A>B) { int temp = A; A = B; B = temp; } // So that A<B int flag = B-A-1; // Zero only when B==A+1 double dist1_old, dist2_old, dist3_old, dist4_old, dist1_new=1, dist2_new, dist3_new, dist4_new; //#pragma omp parallel sections //Severely worsens performance dist1_old = Distance(Path[A-1], Path[A]); //is always needed dist2_old = (!flag) ? 0 : Distance(Path[A], Path[A+1]); //dist ommited when A,B consecutive dist3_old = (!flag) ? 0 : Distance(Path[B-1], Path[B]); //dist ommited when A,B consecutive dist4_old = Distance(Path[B], Path[B+1]); //is always needed dist1_new = Distance(Path[A-1], Path[B]); //is always needed dist2_new = (!flag) ? 0 : Distance(Path[B], Path[A+1]); //dist ommited when A,B consecutive dist3_new = (!flag) ? 0 : Distance(Path[B-1], Path[A]); //dist ommited when A,B consecutive dist4_new = Distance(Path[A], Path[B+1]); //is always needed double distChange = - dist1_old - dist2_old - dist3_old - dist4_old + dist1_new + dist2_new + dist3_new + dist4_new; if (distChange < 0) { //Must be <0 if it decreases the total Distance //Setting the locks here is wrong, as any secondary thread trying to move a city will have calculated //wrong distances omp_set_lock(&Locks[A]); omp_set_lock(&Locks[B]); int temp = Path[A]; Path[A] = Path[B]; Path[B] = temp; omp_unset_lock(&Locks[A]); omp_unset_lock(&Locks[B]); } else distChange=0; totDistChange += distChange; } return totDist + totDistChange; } // **************************************************************************************************************** // Checks if current program parameters lead to feasible spacial states // **************************************************************************************************************** int ValidateParameters() { if (Nx*Ny<N) return 0; return 1; } // **************************************************************************************************************** // Initializes locks // **************************************************************************************************************** void InitializeLocks() { for (int i=0; i<N+1; i++) omp_init_lock(&Locks[i]); } // **************************************************************************************************************** // The main program // **************************************************************************************************************** int main( int argc, const char* argv[] ) { printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); printf("This program searches for the optimal traveling Distance between %d cities,\n", N); printf("spanning in an area of X=(0,%d) and Y=(0,%d)\n", Nx, Ny); printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); if (ValidateParameters() == 0) { printf("\nERROR: NOT ENOUGH SPACE ALLOCATED FOR GIVEN NUMBER OF CITIES\n"); printf("The program will now exit.\n"); return 1; } int repetitions = 0, MaxRepetitions = DEFAULT_MAX_REPETITIONS; if (argc>1) MaxRepetitions = atoi(argv[1]); printf("Maximum number of repetitions set at: %d\n", MaxRepetitions); printf("Maximum number of batches set at: %lf\n", TOTAL_BATCHES); SetCities(); ResetPath(); RandomizePath(); InitializeLocks(); double totDist = PathDistance(); printf("Now running the main algorithm...\n"); do { repetitions ++; if (repetitions%10==0) printf(">>REPETITION:%8d >>BATCH:%10d >>PATH_LENGTH: %.1lf\n", repetitions, repetitions*BATCH_SIZE, totDist); totDist = SwapCities(totDist); } while (repetitions < MaxRepetitions); printf("\nCalculations completed. Results:\n"); printf("Repetitions: %d\n", repetitions); printf("Batches: %d\n", repetitions*BATCH_SIZE); //printf("Estimation of the optimal path length: %.2lf\n", totDist); printf("Actual optimal path length: %.2lf\n", PathDistance()); return 0 ; }
6862.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop { #pragma omp parallel for schedule(static, 28) simd for (i = 0; i < _PB_NY; i++) { y[i] = 0; } #pragma omp parallel for schedule(static, 28) simd for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
Updater.h
/* * Copyright 2016 [See AUTHORS file for list of authors] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _UPDATER_ #define _UPDATER_ #include "DatapointPartitions/DatapointPartitions.h" #include "Gradient/Gradient.h" class Updater { protected: // Keep a reference of the model and datapoints, and partition ordering. Model *model; std::vector<Datapoint *> datapoints; DatapointPartitions *datapoint_partitions; // Gradient object stores extra info for Model processing Gradient gradient; std::vector<int> bookkeeping; // A reference to all_coordinates, which indexes all the coordinates of the model. std::vector<int> all_coordinates; // H, Nu and Mu for updates. virtual double H(int coordinate, int index_into_coordinate_vector) { return 0; } virtual double Nu(int coordinate, int index_into_coordinate_vector) { return 0; } virtual double Mu(int coordinate) { return 0; } // After calling PrepareNu/Mu/H, for the given coordinates, we expect that // calls to Nu/Mu/H are ready. virtual void PrepareNu(const std::vector<int> &coordinates) {} virtual void PrepareMu(const std::vector<int> &coordinates) {} virtual void PrepareH(const Datapoint &datapoint) {} // By default need catch up. virtual bool NeedCatchUp() { return true; } virtual void ApplyGradient(const Datapoint &datapoint) { static int cnt = 0; int n_coords = model->NumCoordinates(); for (const auto &c : datapoint.GetCoordinates()) { const auto &mu = Mu(c); tasvir_log(&model->Data(c, 0, false), sizeof(double) * n_coords); for (int j = 0; j < n_coords; j++) model->Data(c, j, false) = (1 - mu) * model->Data(c, j, false) - Nu(c, j) + H(c, j); } } virtual void CatchUp(int index, int diff) { if (!NeedCatchUp()) return; if (diff < 0) diff = 0; int n_coords = model->NumCoordinates(); double geom_sum = 0; double mu = Mu(index); if (mu != 0) geom_sum = ((1 - pow(1 - mu, diff + 1)) / (1 - (1 - mu))) - 1; tasvir_log(&model->Data(index, 0, false), sizeof(double) * n_coords); for (int j = 0; j < n_coords; j++) model->Data(index, j, false) = pow(1 - mu, diff) * model->Data(index, j, false) - Nu(index, j) * geom_sum; } virtual void CatchUpDatapoint(const Datapoint &datapoint) { int n_coords = model->NumCoordinates(); for (const auto &c : datapoint.GetCoordinates()) { int diff = datapoint.GetOrder() - bookkeeping[c] - 1; CatchUp(c, diff); } } virtual void FinalCatchUp() { const auto &n_coords = model->NumCoordinates(); const auto &parameter_size = model->NumParameters(); // #pragma omp parallel num_threads(FLAGS_n_threads) PrepareNu(all_coordinates); PrepareMu(all_coordinates); // #pragma omp for for (int i = 0; i < model->NumParameters(); i++) { int diff = parameter_size - bookkeeping[i]; CatchUp(i, diff); } } public: Updater(Model *model, std::vector<Datapoint *> &datapoints) { this->model = model; this->datapoints = datapoints; for (int i = 0; i < model->NumParameters(); i++) { // Set up bookkeping. bookkeeping.push_back(0); // Keep an array that has integers 1...n_coords. all_coordinates.push_back(i); } } Updater() {} virtual ~Updater() {} // Could be useful to get partitioning info. virtual void SetUpWithPartitions(DatapointPartitions &partitions) { datapoint_partitions = &partitions; } // Main update method, which is run by multiple threads. virtual void Update(const Datapoint &datapoint) { gradient.Clear(); gradient.datapoint = &datapoint; // First prepare Nu and Mu for catchup since they are independent of the the model. PrepareNu(datapoint.GetCoordinates()); PrepareMu(datapoint.GetCoordinates()); CatchUpDatapoint(datapoint); // After catching up, prepare H and apply the gradient. PrepareH(datapoint); ApplyGradient(datapoint); // Update bookkeeping. for (const auto &coordinate : datapoint.GetCoordinates()) bookkeeping[coordinate] = datapoint.GetOrder(); } // Called before epoch begins. virtual void EpochBegin() {} // Called when the epoch ends. virtual void EpochFinish() { FinalCatchUp(); std::fill(bookkeeping.begin(), bookkeeping.end(), 0); } }; #endif
kpoint.c
/* kpoint.c */ /* Copyright (C) 2008 Atsushi Togo */ #include <stdio.h> #include <stdlib.h> #include "mathfunc.h" #include "symmetry.h" #include "kpoint.h" #include "debug.h" /* #define GRID_ORDER_XYZ */ /* The addressing order of mesh grid is defined as running left */ /* element first. But when GRID_ORDER_XYZ is defined, it is changed to right */ /* element first. */ static PointSymmetry get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal); static PointSymmetry get_point_group_reciprocal_with_q(SPGCONST PointSymmetry * pointgroup, const double symprec, const int num_q, SPGCONST double qpoints[][3]); static int get_ir_kpoints(int map[], SPGCONST double kpoints[][3], const int num_kpoint, SPGCONST PointSymmetry * point_symmetry, const double symprec); static int get_ir_reciprocal_mesh(int grid_point[][3], int map[], const int mesh[3], const int is_shift[3], SPGCONST PointSymmetry * point_symmetry); static Triplets * get_ir_triplets(const int mesh[3], const int is_time_reversal, const MatINT * rotations, const double symprec); static int get_ir_triplets_with_q(int weights[], int grid_points[][3], int third_q[], const int grid_point, const int mesh[3], PointSymmetry * pointgroup, const double symprec); static int extract_ir_triplets_with_q(int triplets_with_q[][3], int weight_with_q[], const int fixed_grid_number, SPGCONST int triplets[][3], const int num_triplets, const int mesh[3], SPGCONST PointSymmetry * point_symmetry); static void get_grid_mapping_table(int **map_sym, SPGCONST PointSymmetry * point_symmetry, const int mesh[3], const int is_shift[3]); static void address_to_grid(int grid_double[3], const int address, const int mesh[3], const int is_shift[3]); static void get_grid_points(int grid_point[3], const int grid[3], const int mesh[3]); static void get_vector_modulo(int v[3], const int m[3]); static int grid_to_address(const int grid[3], const int mesh[3], const int is_shift[3]); static void free_array2D_int(int **array, const int num_row); static int ** allocate_array2d_int(const int num_row, const int num_column); static Triplets * allocate_triplets(const int num_triplets, const int mesh[3]); int kpt_get_irreducible_kpoints(int map[], SPGCONST double kpoints[][3], const int num_kpoint, const Symmetry * symmetry, const int is_time_reversal, const double symprec) { int i; PointSymmetry point_symmetry; MatINT *rotations; rotations = mat_alloc_MatINT(symmetry->size); for (i = 0; i < symmetry->size; i++) { mat_copy_matrix_i3(rotations->mat[i], symmetry->rot[i]); } point_symmetry = get_point_group_reciprocal(rotations, is_time_reversal); mat_free_MatINT(rotations); return get_ir_kpoints(map, kpoints, num_kpoint, &point_symmetry, symprec); } /* grid_point (e.g. 4x4x4 mesh) */ /* [[ 0 0 0] */ /* [ 1 0 0] */ /* [ 2 0 0] */ /* [-1 0 0] */ /* [ 0 1 0] */ /* [ 1 1 0] */ /* [ 2 1 0] */ /* [-1 1 0] */ /* .... ] */ /* */ /* Each value of 'map' correspnds to the index of grid_point. */ int kpt_get_irreducible_reciprocal_mesh(int grid_points[][3], int map[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const Symmetry * symmetry) { int i; PointSymmetry point_symmetry; MatINT *rotations; rotations = mat_alloc_MatINT(symmetry->size); for (i = 0; i < symmetry->size; i++) { mat_copy_matrix_i3(rotations->mat[i], symmetry->rot[i]); } point_symmetry = get_point_group_reciprocal(rotations, is_time_reversal); mat_free_MatINT(rotations); return get_ir_reciprocal_mesh(grid_points, map, mesh, is_shift, &point_symmetry); } void kpt_free_triplets(Triplets * t) { free(t->triplets); t->triplets = NULL; free(t->weights); t->weights = NULL; free(t->mesh_points); t->mesh_points = NULL; free(t); t = NULL; } int kpt_get_stabilized_reciprocal_mesh(int grid_points[][3], int map[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const int num_q, SPGCONST double qpoints[][3], const double symprec) { PointSymmetry pointgroup, pointgroup_q; pointgroup = get_point_group_reciprocal(rotations, is_time_reversal); pointgroup_q = get_point_group_reciprocal_with_q(&pointgroup, symprec, num_q, qpoints); return get_ir_reciprocal_mesh(grid_points, map, mesh, is_shift, &pointgroup_q); } Triplets * kpt_get_triplets_reciprocal_mesh(const int mesh[3], const int is_time_reversal, const MatINT * rotations, const double symprec) { return get_ir_triplets(mesh, is_time_reversal, rotations, symprec); } int kpt_get_ir_triplets_at_q(int weights[], int grid_points[][3], int third_q[], const int grid_point, const int mesh[3], const int is_time_reversal, const MatINT * rotations, const double symprec) { PointSymmetry pointgroup; pointgroup = get_point_group_reciprocal(rotations, is_time_reversal); return get_ir_triplets_with_q(weights, grid_points, third_q, grid_point, mesh, &pointgroup, symprec); } int kpt_extract_triplets_reciprocal_mesh_at_q(int triplets_with_q[][3], int weight_with_q[], const int fixed_grid_number, const int num_triplets, SPGCONST int triplets[][3], const int mesh[3], const int is_time_reversal, const MatINT * rotations) { PointSymmetry point_group; point_group = get_point_group_reciprocal(rotations, is_time_reversal); return extract_ir_triplets_with_q(triplets_with_q, weight_with_q, fixed_grid_number, triplets, num_triplets, mesh, &point_group); } /* qpoints are used to find stabilizers (operations). */ /* num_q is the number of the qpoints. */ static PointSymmetry get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { int i, j, num_pt = 0; MatINT *rot_reciprocal; PointSymmetry point_symmetry; SPGCONST int inversion[3][3] = { {-1, 0, 0 }, { 0,-1, 0 }, { 0, 0,-1 } }; if (is_time_reversal) { rot_reciprocal = mat_alloc_MatINT(rotations->size * 2); } else { rot_reciprocal = mat_alloc_MatINT(rotations->size); } for (i = 0; i < rotations->size; i++) { mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]); if (is_time_reversal) { mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i], inversion, rot_reciprocal->mat[i]); } } for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_pt; j++) { if (mat_check_identity_matrix_i3(point_symmetry.rot[j], rot_reciprocal->mat[i])) { goto escape; } } mat_copy_matrix_i3(point_symmetry.rot[num_pt], rot_reciprocal->mat[i]); num_pt++; escape: ; } point_symmetry.size = num_pt; mat_free_MatINT(rot_reciprocal); return point_symmetry; } static PointSymmetry get_point_group_reciprocal_with_q(SPGCONST PointSymmetry * pointgroup, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { int i, j, k, l, is_all_ok=0, num_ptq = 0; double q_rot[3], diff[3]; PointSymmetry pointgroup_q; for (i = 0; i < pointgroup->size; i++) { for (j = 0; j < num_q; j++) { is_all_ok = 0; mat_multiply_matrix_vector_id3(q_rot, pointgroup->rot[i], qpoints[j]); for (k = 0; k < num_q; k++) { for (l = 0; l < 3; l++) { diff[l] = q_rot[l] - qpoints[k][l]; diff[l] -= mat_Nint(diff[l]); } if (mat_Dabs(diff[0]) < symprec && mat_Dabs(diff[1]) < symprec && mat_Dabs(diff[2]) < symprec) { is_all_ok = 1; break; } } if (! is_all_ok) { break; } } if (is_all_ok) { mat_copy_matrix_i3(pointgroup_q.rot[num_ptq], pointgroup->rot[i]); num_ptq++; } } pointgroup_q.size = num_ptq; return pointgroup_q; } static int get_ir_kpoints(int map[], SPGCONST double kpoints[][3], const int num_kpoint, SPGCONST PointSymmetry * point_symmetry, const double symprec) { int i, j, k, l, num_ir_kpoint = 0, is_found; int *ir_map; double kpt_rot[3], diff[3]; ir_map = (int*)malloc(num_kpoint*sizeof(int)); for (i = 0; i < num_kpoint; i++) { map[i] = i; is_found = 1; for (j = 0; j < point_symmetry->size; j++) { mat_multiply_matrix_vector_id3(kpt_rot, point_symmetry->rot[j], kpoints[i]); for (k = 0; k < 3; k++) { diff[k] = kpt_rot[k] - kpoints[i][k]; diff[k] = diff[k] - mat_Nint(diff[k]); } if (mat_Dabs(diff[0]) < symprec && mat_Dabs(diff[1]) < symprec && mat_Dabs(diff[2]) < symprec) { continue; } for (k = 0; k < num_ir_kpoint; k++) { mat_multiply_matrix_vector_id3(kpt_rot, point_symmetry->rot[j], kpoints[i]); for (l = 0; l < 3; l++) { diff[l] = kpt_rot[l] - kpoints[ir_map[k]][l]; diff[l] = diff[l] - mat_Nint(diff[l]); } if (mat_Dabs(diff[0]) < symprec && mat_Dabs(diff[1]) < symprec && mat_Dabs(diff[2]) < symprec) { is_found = 0; map[i] = ir_map[k]; break; } } if (! is_found) break; } if (is_found) { ir_map[num_ir_kpoint] = i; num_ir_kpoint++; } } free(ir_map); ir_map = NULL; return num_ir_kpoint; } static int get_ir_reciprocal_mesh(int grid[][3], int map[], const int mesh[3], const int is_shift[3], SPGCONST PointSymmetry * point_symmetry) { /* In the following loop, mesh is doubled. */ /* Even and odd mesh numbers correspond to */ /* is_shift[i] = 0 and 1, respectively. */ /* is_shift = [0,0,0] gives Gamma center mesh. */ /* grid: reducible grid points */ /* map: the mapping from each point to ir-point. */ int i, j, k, l, address, address_rot, num_ir = 0; int grid_double[3], grid_rot[3], mesh_double[3]; for (i = 0; i < 3; i++) { mesh_double[i] = mesh[i] * 2; } /* "-1" means the element is not touched yet. */ for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { map[i] = -1; } #ifndef GRID_ORDER_XYZ for (i = 0; i < mesh_double[2]; i++) { if ((is_shift[2] && i % 2 == 0) || (is_shift[2] == 0 && i % 2 != 0)) continue; for (j = 0; j < mesh_double[1]; j++) { if ((is_shift[1] && j % 2 == 0) || (is_shift[1] == 0 && j % 2 != 0)) continue; for (k = 0; k < mesh_double[0]; k++) { if ((is_shift[0] && k % 2 == 0) || (is_shift[0] == 0 && k % 2 != 0)) continue; grid_double[0] = k; grid_double[1] = j; grid_double[2] = i; #else for (i = 0; i < mesh_double[0]; i++) { if ((is_shift[0] && i % 2 == 0) || (is_shift[0] == 0 && i % 2 != 0)) continue; for (j = 0; j < mesh_double[1]; j++) { if ((is_shift[1] && j % 2 == 0) || (is_shift[1] == 0 && j % 2 != 0)) continue; for (k = 0; k < mesh_double[2]; k++) { if ((is_shift[2] && k % 2 == 0) || (is_shift[2] == 0 && k % 2 != 0)) continue; grid_double[0] = i; grid_double[1] = j; grid_double[2] = k; #endif address = grid_to_address(grid_double, mesh, is_shift); get_grid_points(grid[address], grid_double, mesh); for (l = 0; l < point_symmetry->size; l++) { mat_multiply_matrix_vector_i3(grid_rot, point_symmetry->rot[l], grid_double); get_vector_modulo(grid_rot, mesh_double); address_rot = grid_to_address(grid_rot, mesh, is_shift); if (address_rot > -1) { /* Invalid if even --> odd or odd --> even */ if (map[address_rot] > -1) { map[address] = map[address_rot]; break; } } } /* Set itself to the map when equivalent point */ /* with smaller numbering could not be found. */ if (map[address] == -1) { map[address] = address; num_ir++; } } } } return num_ir; } /* Unique q-point triplets that conserve the momentum, */ /* q+q'+q''=G, are obtained. */ /* */ /* The first q-point is selected among the ir-q-points. */ /* The second q-point is selected among the ir-q-points */ /* constrained by the first q-point (stabilizer) */ /* The third q-point is searched through the all grid */ /* points and is checked if it satisfies q+q'+q''=G, */ /* here q, q', and q'' can be exchanged one another. */ static Triplets * get_ir_triplets(const int mesh[3], const int is_time_reversal, const MatINT * rotations, const double symprec) { int i, j, k, l, num_ir, num_grid, weight, weight_q, count, q_2; int num_triplets, num_unique_q; int mesh_double[3], address[3], is_shift[3]; int grid_double[3][3]; int (*grid)[3], (*grid_local)[3]; int *map, *map_q, *unique_q; int **map_sym = NULL; int **weight_counts; double stabilizer_q[1][3]; PointSymmetry point_symmetry, point_symmetry_q; Triplets * tps; const int index_exchange[6][3] = {{ 0, 1, 2 }, { 2, 0, 1 }, { 1, 2, 0 }, { 2, 1, 0 }, { 0, 2, 1 }, { 1, 0, 2 }}; num_grid = mesh[0] * mesh[1] * mesh[2]; map = (int*) malloc(num_grid * sizeof(int)); unique_q = (int*) malloc(num_grid * sizeof(int)); grid = (int (*)[3]) malloc(sizeof(int[3]) * num_grid); point_symmetry = get_point_group_reciprocal(rotations, is_time_reversal); /* Only consider the gamma-point */ for (i = 0; i < 3; i++) { is_shift[i] = 0; } num_ir = get_ir_reciprocal_mesh(grid, map, mesh, is_shift, &point_symmetry); weight_counts = allocate_array2d_int(num_ir, num_grid); for (i = 0; i < num_ir; i++) { for (j = 0; j < num_grid; j++) { weight_counts[i][j] = 0; } } for (i = 0; i < 3; i++) { mesh_double[i] = mesh[i] * 2; } /* Prepare triplet mapping table to enhance speed of query */ /* 'unique_q' numbering is prepared for saving memory space */ num_unique_q = 0; for (i = 0; i < num_grid; i++) { if (i == map[i]) { unique_q[i] = num_unique_q; num_unique_q++; } else { unique_q[i] = unique_q[map[i]]; } } /* Prepare grid point mapping table */ map_sym = allocate_array2d_int(point_symmetry.size, num_grid); get_grid_mapping_table(map_sym, &point_symmetry, mesh, is_shift); /* Search triplets without considersing combination */ /* #pragma omp parallel for private(j, k, l, grid_double, point_symmetry_q, stabilizer_q, weight_q, grid_local, address, map_q, weight ) */ for (i = 0; i < num_grid; i++) { if (! (i == map[i])) { continue; } weight = 0; for (j = 0; j < num_grid; j++) { if (i == map[j]) { weight++; } } /* Search irreducible q-points (map_q) with a stabilizer */ address_to_grid(grid_double[0], i, mesh, is_shift); /* q */ for (j = 0; j < 3; j++) { stabilizer_q[0][j] = (double)grid_double[0][j] / mesh_double[j]; } point_symmetry_q = get_point_group_reciprocal_with_q(&point_symmetry, symprec, 1, stabilizer_q); grid_local = (int (*)[3]) malloc(sizeof(int[3]) * num_grid); map_q = (int*) malloc(num_grid * sizeof(int)); get_ir_reciprocal_mesh(grid_local, map_q, mesh, is_shift, &point_symmetry_q); free(grid_local); grid_local = NULL; for (j = 0; j < num_grid; j++) { if (! (j == map_q[j])) { continue; } weight_q = 0; for (k = 0; k < num_grid; k++) { if (j == map_q[k]) { weight_q++; } } address_to_grid(grid_double[1], j, mesh, is_shift); /* q' */ for (k = 0; k < 3; k++) { /* q'' */ grid_double[2][k] = - grid_double[0][k] - grid_double[1][k]; } get_vector_modulo(grid_double[2], mesh_double); q_2 = grid_to_address(grid_double[2], mesh, is_shift); /* Look for irreducible triplets exchanging three q-points */ /* and equivalent by symmetry rotations */ for (k = 0; k < point_symmetry.size; k++) { /* Index exchange */ for (l = 0; l < 6; l++) { /* Rotated grid point addresses with index exchange */ address[index_exchange[l][0]] = map_sym[k][i]; address[index_exchange[l][1]] = map_sym[k][j]; address[index_exchange[l][2]] = map_sym[k][q_2]; /* address[0] has to be one of ir-q-points. */ if (address[0] == map[address[0]]) { /* Is the set of ddress[0] and address[1] already found? */ if (weight_counts[unique_q[address[0]]][address[1]]) { weight_counts[unique_q[address[0]]][address[1]] += weight * weight_q; goto escape; } } } } /* Not found, then this is an irreducible triplet. */ weight_counts[unique_q[i]][j] = weight * weight_q; escape: ; } free(map_q); map_q = NULL; } num_triplets = 0; for (i = 0; i < num_grid; i++) { if (! (i == map[i])) { continue; } for (j = 0; j < num_grid; j++) { if (weight_counts[unique_q[i]][j]) { num_triplets++; } } } tps = allocate_triplets(num_triplets, mesh); for (i = 0; i < num_grid; i++) { for (j = 0; j < 3; j++) { tps->mesh_points[i][j] = grid[i][j]; } } count = 0; for (i = 0; i < num_grid; i++) { if (! (i == map[i])) { continue; } for (j = 0; j < num_grid; j++) { if (weight_counts[unique_q[i]][j] ) { tps->triplets[count][0] = i; tps->triplets[count][1] = j; address_to_grid(grid_double[0], i, mesh, is_shift); /* q */ address_to_grid(grid_double[1], j, mesh, is_shift); /* q' */ for (l = 0; l < 3; l++) { /* q'' */ grid_double[2][l] = - grid_double[0][l] - grid_double[1][l]; } get_vector_modulo(grid_double[2], mesh_double); tps->triplets[count][2] = grid_to_address(grid_double[2], mesh, is_shift); tps->weights[count] = weight_counts[unique_q[i]][j]; count++; } } } free_array2D_int(map_sym, point_symmetry.size); free_array2D_int(weight_counts, num_ir); free(map); map = NULL; free(unique_q); unique_q = NULL; free(grid); grid = NULL; return tps; } static int get_ir_triplets_with_q(int weights[], int grid_points[][3], int third_q[], const int grid_point, const int mesh[3], PointSymmetry * pointgroup, const double symprec) { int i, j, k, num_grid, weight_q, q_2, num_ir; int mesh_double[3], address[3], is_shift[3]; int grid_double[3][3]; int *map_q; int **map_sym = NULL; double stabilizer_q[1][3]; PointSymmetry pointgroup_q; const int index_exchange[6][3] = {{ 0, 1, 2 }, { 2, 0, 1 }, { 1, 2, 0 }, { 2, 1, 0 }, { 0, 2, 1 }, { 1, 0, 2 }}; num_grid = mesh[0] * mesh[1] * mesh[2]; for (i = 0; i < 3; i++) { /* Only consider the gamma-point */ is_shift[i] = 0; mesh_double[i] = mesh[i] * 2; } /* Search irreducible q-points (map_q) with a stabilizer */ address_to_grid(grid_double[0], grid_point, mesh, is_shift); /* q */ for (i = 0; i < 3; i++) { stabilizer_q[0][i] = (double)grid_double[0][i] / mesh_double[i]; } pointgroup_q = get_point_group_reciprocal_with_q(pointgroup, symprec, 1, stabilizer_q); map_sym = allocate_array2d_int(pointgroup->size, num_grid); get_grid_mapping_table(map_sym, pointgroup, mesh, is_shift); map_q = (int*) malloc(sizeof(int) * num_grid); get_ir_reciprocal_mesh(grid_points, map_q, mesh, is_shift, &pointgroup_q); for (i = 0; i < num_grid; i++) { weights[i] = 0; third_q[i] = -1; } num_ir = 0; for (i = 0; i < num_grid; i++) { if (! (i == map_q[i])) { continue; } weight_q = 0; for (j = 0; j < num_grid; j++) { if (i == map_q[j]) { weight_q++; } } address_to_grid(grid_double[1], i, mesh, is_shift); /* q' */ for (j = 0; j < 3; j++) { /* q'' */ grid_double[2][j] = - grid_double[0][j] - grid_double[1][j]; } get_vector_modulo(grid_double[2], mesh_double); q_2 = grid_to_address(grid_double[2], mesh, is_shift); third_q[i] = q_2; /* Look for irreducible triplets exchanging three q-points */ /* and equivalent by symmetry rotations */ for (j = 0; j < pointgroup->size; j++) { /* Index exchange */ for (k = 0; k < 6; k++) { /* Rotated grid point addresses with index exchange */ address[index_exchange[k][0]] = map_sym[j][grid_point]; address[index_exchange[k][1]] = map_sym[j][i]; address[index_exchange[k][2]] = map_sym[j][q_2]; if (address[0] == grid_point) { /* Is the set of ddress[0] and address[1] already found? */ if (weights[address[1]]) { weights[address[1]] += weight_q; goto escape; } } } } /* Not found, then this is an irreducible triplet. */ weights[i] = weight_q; num_ir++; escape: ; } free(map_q); map_q = NULL; free_array2D_int(map_sym, pointgroup->size); return num_ir; } static int extract_ir_triplets_with_q(int triplets_with_q[][3], int weight_with_q[], const int fixed_grid_number, SPGCONST int triplets[][3], const int num_triplets, const int mesh[3], SPGCONST PointSymmetry *point_symmetry) { int i, j, k, sym_num, rest_index, num_triplets_with_q; int address0, address1, address1_orig, found; int is_shift[3]; int num_grid; int **map_sym; num_grid = mesh[0] * mesh[1] * mesh[2]; map_sym = allocate_array2d_int(point_symmetry->size, num_grid); /* Only consider the gamma-point */ for (i = 0; i < 3; i++) { is_shift[i] = 0; } /* Prepare mapping tables */ get_grid_mapping_table(map_sym, point_symmetry, mesh, is_shift); num_triplets_with_q = 0; for (i = 0; i < num_triplets; i++) { sym_num = -1; for (j = 0; j < point_symmetry->size; j++) { address0 = map_sym[j][fixed_grid_number]; if (triplets[i][0] == address0 || triplets[i][1] == address0 || triplets[i][2] == address0) { for (k = 0; k < num_grid; k++) { address1 = map_sym[j][k]; /* Matching indices 0 and 1 */ if ((triplets[i][0] == address0 && triplets[i][1] == address1) || (triplets[i][1] == address0 && triplets[i][0] == address1)) { sym_num = j; rest_index = 2; address1_orig = k; break; } /* Matching indices 1 and 2 */ if ((triplets[i][1] == address0 && triplets[i][2] == address1) || (triplets[i][2] == address0 && triplets[i][1] == address1)) { sym_num = j; rest_index = 0; address1_orig = k; break; } /* Matching indices 2 and 0 */ if ((triplets[i][2] == address0 && triplets[i][0] == address1) || (triplets[i][0] == address0 && triplets[i][2] == address1)) { sym_num = j; rest_index = 1; address1_orig = k; break; } } if (sym_num > -1) { break; } } } /* Found? */ if (sym_num > -1) { for (j = 0; j < num_grid; j++) { if (map_sym[sym_num][j] == triplets[i][rest_index]) { triplets_with_q[num_triplets_with_q][0] = fixed_grid_number; if (j > address1_orig) { triplets_with_q[num_triplets_with_q][1] = address1_orig; triplets_with_q[num_triplets_with_q][2] = j; } else { triplets_with_q[num_triplets_with_q][2] = address1_orig; triplets_with_q[num_triplets_with_q][1] = j; } num_triplets_with_q++; break; } } } } for (i = 0; i < num_triplets_with_q; i++) { weight_with_q[i] = 0; } for (i = 0; i < num_grid; i++) { found = 0; for (j = 0; j < num_triplets_with_q; j++) { for (k = 0; k < point_symmetry->size; k++) { if (map_sym[k][fixed_grid_number] == triplets_with_q[j][0]) { if (map_sym[k][i] == triplets_with_q[j][1] || map_sym[k][i] == triplets_with_q[j][2]) { weight_with_q[j]++; found = 1; break; } } if (map_sym[k][fixed_grid_number] == triplets_with_q[j][1]) { if (map_sym[k][i] == triplets_with_q[j][2] || map_sym[k][i] == triplets_with_q[j][0]) { weight_with_q[j]++; found = 1; break; } } if (map_sym[k][fixed_grid_number] == triplets_with_q[j][2]) { if (map_sym[k][i] == triplets_with_q[j][0] || map_sym[k][i] == triplets_with_q[j][1]) { weight_with_q[j]++; found = 1; break; } } } if (found) { break; } } if (! found) { warning_print("spglib: Unexpected behavior in extract_ir_triplets_with_q "); warning_print("(line %d, %s).\n", __LINE__, __FILE__); num_triplets_with_q = 0; break; } } free_array2D_int(map_sym, point_symmetry->size); return num_triplets_with_q; } static void get_grid_mapping_table(int **map_sym, SPGCONST PointSymmetry *point_symmetry, const int mesh[3], const int is_shift[3]) { int i, j; int grid_rot[3], grid_double[3], mesh_double[3]; for (i = 0; i < 3; i++) { mesh_double[i] = mesh[i] * 2; } for (i = 0; i < point_symmetry->size; i++) { for (j = 0; j < mesh[0]*mesh[1]*mesh[2]; j++) { address_to_grid(grid_double, j, mesh, is_shift); mat_multiply_matrix_vector_i3(grid_rot, point_symmetry->rot[i], grid_double); get_vector_modulo(grid_rot, mesh_double); map_sym[i][j] = grid_to_address(grid_rot, mesh, is_shift); } } } static int grid_to_address(const int grid_double[3], const int mesh[3], const int is_shift[3]) { int i, grid[3]; for (i = 0; i < 3; i++) { if (grid_double[i] % 2 == 0 && (! is_shift[i]) ) { grid[i] = grid_double[i] / 2; } else { if (grid_double[i] % 2 != 0 && is_shift[i]) { grid[i] = (grid_double[i] - 1) / 2; } else { return -1; } } } #ifndef GRID_ORDER_XYZ return grid[2] * mesh[0] * mesh[1] + grid[1] * mesh[0] + grid[0]; #else return grid[0] * mesh[1] * mesh[2] + grid[1] * mesh[2] + grid[2]; #endif } static void address_to_grid(int grid_double[3], const int address, const int mesh[3], const int is_shift[3]) { int i; int grid[3]; #ifndef GRID_ORDER_XYZ grid[2] = address / (mesh[0] * mesh[1]); grid[1] = (address - grid[2] * mesh[0] * mesh[1]) / mesh[0]; grid[0] = address % mesh[0]; #else grid[0] = address / (mesh[1] * mesh[2]); grid[1] = (address - grid[0] * mesh[1] * mesh[2]) / mesh[2]; grid[2] = address % mesh[2]; #endif for (i = 0; i < 3; i++) { grid_double[i] = grid[i] * 2 + is_shift[i]; } } static void get_grid_points(int grid[3], const int grid_double[3], const int mesh[3]) { int i; for (i = 0; i < 3; i++) { if (grid_double[i] % 2 == 0) { grid[i] = grid_double[i] / 2; } else { grid[i] = (grid_double[i] - 1) / 2; } #ifndef GRID_BOUNDARY_AS_NEGATIVE grid[i] = grid[i] - mesh[i] * (grid[i] > mesh[i] / 2); #else grid[i] = grid[i] - mesh[i] * (grid[i] >= mesh[i] / 2); #endif } } static void get_vector_modulo(int v[3], const int m[3]) { int i; for (i = 0; i < 3; i++) { v[i] = v[i] % m[i]; if (v[i] < 0) v[i] += m[i]; } } static void free_array2D_int(int **array, const int num_row) { int i; for (i = 0; i < num_row; i++) { free(array[i]); array[i] = NULL; } free(array); array = NULL; } static int ** allocate_array2d_int(const int num_row, const int num_column) { int i; int **array; array = (int**) malloc(num_row * sizeof(int*)); for (i = 0; i < num_row; i++) { array[i] = (int*) malloc(num_column * sizeof(int)); } return array; } static Triplets * allocate_triplets(const int num_triplets, const int mesh[3]) { int i, num_grid; Triplets * tps; num_grid = mesh[0] * mesh[1] * mesh[2]; tps = (Triplets*) malloc(sizeof(Triplets)); tps->size = num_triplets; tps->triplets = (int (*)[3]) malloc(sizeof(int[3]) * num_triplets); tps->weights = (int*) malloc(sizeof(int) * num_triplets); tps->mesh_points = (int (*)[3]) malloc(sizeof(int[3]) * num_grid); for (i = 0; i < 3; i++) { tps->mesh[i] = mesh[i]; } return tps; }
MsnhCVVector.h
#ifndef MSNHCVVECTOR_H #define MSNHCVVECTOR_H #include <Msnhnet/config/MsnhnetCfg.h> #include <iostream> namespace Msnhnet { template<int N,typename T> class MsnhNet_API Vector { public: Vector(){} Vector(const std::vector<T> &val) { if(val.size()!=N) { throw Exception(1,"[Vector]: set val num must equal data num! \n", __FILE__, __LINE__, __FUNCTION__); } for (int i = 0; i < N; ++i) { this->_value[i] = val[i]; } } Vector(const Vector& vec) { memcpy(this->_value,vec._value,sizeof(T)*N); } Vector &operator= (const Vector &vec) { memcpy(this->_value,vec._value,sizeof(T)*N); return *this; } inline void fill(const T &value) { for (int i = 0; i < N; ++i) { this->_value[i] = value; } } inline void print() { std::cout<<"{ Vector: "<<N<<std::endl; if(isF32Vec()) { for (int i = 0; i < N; ++i) { std::cout<<std::setiosflags(std::ios::left)<<std::setprecision(6)<<std::setw(6)<<_value[i]<<" "; } } else if(isF64Vec()) { for (int i = 0; i < N; ++i) { std::cout<<std::setiosflags(std::ios::left)<<std::setprecision(12)<<std::setw(12)<<_value[i]<<" "; } } else { for (int i = 0; i < N; ++i) { std::cout<<_value[i]<<" "; } } std::cout<<"\n}"<<std::endl; } void setVal(const std::vector<T> &val) { if(val.size()!=N) { throw Exception(1,"[Vector]: set val num must equal data num! \n", __FILE__, __LINE__, __FUNCTION__); } for (int i = 0; i < N; ++i) { this->_value[i] = val[i]; } } void setVal(const int &index, const T &val) { if(index>(N-1)) { throw Exception(1,"[Vector]: index out of memory! \n", __FILE__, __LINE__, __FUNCTION__); } this->_value[index] = val; } bool isFuzzyNull() const { if(isF32Vec()) { for (int i = 0; i < N; ++i) { if(fabsf(this->_value[i])>MSNH_F32_EPS) { return false; } } return true; } else if(isF64Vec()) { for (int i = 0; i < N; ++i) { if(abs(this->_value[i])>MSNH_F64_EPS) { return false; } } return true; } else { for (int i = 0; i < N; ++i) { if(this->_value[i]>0) { return false; } } return true; } } inline bool isNan() const { for (int i = 0; i < N; ++i) { if(std::isnan(static_cast<double>(this->_value[i]))) { return true; } } return false; } inline bool isF32Vec() const { return std::is_same<T,float>::value; } inline bool isF64Vec() const { return std::is_same<T,double>::value; } Vector normalized() const { if(!(isF32Vec() || isF64Vec())) { throw Exception(1, "[Vector] normalize only f32 and f64 is supported!", __FILE__, __LINE__,__FUNCTION__); } T len = 0; Vector vec; for (int i = 0; i < N; ++i) { len += this->_value[i]*this->_value[i]; } if(isF32Vec()) { if(fabsf(len - 1.0f) < MSNH_F32_EPS) { return *this; } if(fabsf(len) < MSNH_F32_EPS) { return vec; } len = sqrtf(len); } else if(isF32Vec()) { if(abs(len - 1.0) < MSNH_F64_EPS) { return *this; } if(abs(len) < MSNH_F64_EPS) { return vec; } len = sqrt(len); } for (int i = 0; i < N; ++i) { vec[i] = this->_value[i] / len; } return vec; } void normalize() { if(!(isF32Vec() || isF64Vec())) { throw Exception(1, "[Vector] normalize only f32 and f64 is supported!", __FILE__, __LINE__,__FUNCTION__); } T len = 0; for (int i = 0; i < N; ++i) { len += this->_value[i]*this->_value[i]; } if(this->isF32Vec()) { if(fabsf(len - 1.0f) < MSNH_F32_EPS || fabsf(len) < MSNH_F32_EPS) { return; } len = sqrtf(len); } else { if(abs(len - 1.0) < MSNH_F64_EPS || abs(len) < MSNH_F64_EPS) { return; } len = sqrt(len); } for (int i = 0; i < N; ++i) { this->_value[i] = this->_value[i] / len; } } inline double length() const { double len = 0; for (int i = 0; i < N; ++i) { len += this->_value[i]*this->_value[i]; } return sqrt(len); } inline double lengthSquared() const { double len = 0; for (int i = 0; i < N; ++i) { len += this->_value[i]*this->_value[i]; } return len; } /* 点到点之间的距离 * .eg ^ * | * A x --> --> ---> * | \ OA - OB = |BA| * | \ * O |-----x--> * B */ inline double distanceToPoint(const Vector &point) const { return (*this - point).length(); } /* 点到线之间的距离 * .eg ^ * \ | * x x(A) * | \ * | x (point) * | \ * O |-------x--> B * \(direction) * \LINE(point + direction) */ inline double distanceToLine(const Vector &point, const Vector &direction) const { if(N<2) { throw Exception(1,"[Vector] only 2 dims+ is supported!",__FILE__,__LINE__,__FUNCTION__); } if(direction.isFuzzyNull()) { return (*this - point).length(); } Vector p = point + Vector::dotProduct((*this-point)*direction,direction); return (*this - p).length(); } /* 点到线之间的距离 * .eg ^ * / \ | *(normal) * / x * * / | \ * / | \ x(A) * \ *| \ * \ O |-------x--> B * * \ / / * * /\ / * / \ / (plane) * / \/ * */ inline double distanceToPlane(const Vector& plane, const Vector& normal) const { if(N<3) { throw Exception(1,"[Vector] only 3 dims+ is supported!",__FILE__,__LINE__,__FUNCTION__); } return (*this-plane)*normal; } inline static Vector crossProduct(const Vector &v1, const Vector &v2) { if(N!=3) { throw Exception(1,"[Vector] only 3 dims is supported!",__FILE__,__LINE__,__FUNCTION__); } return Vector({ v1[1]*v2[2] - v1[2]*v2[1], v1[2]*v2[0] - v1[0]*v2[2], v1[0]*v2[1] - v1[1]*v2[0]}); } inline static Vector normal(const Vector &v1, const Vector &v2) { return crossProduct(v1,v2).normalized(); } inline static Vector normal(const Vector &v1, const Vector &v2, const Vector &v3) { return crossProduct((v2-v1),(v3-v1)).normalized(); } inline static T dotProduct(const Vector &A, const Vector &B) { T finalVal = 0; for (int i = 0; i < N; ++i) { finalVal += A[i]*B[i]; } return finalVal; } inline T operator [](const int &index) const { if(index > (N-1)) { throw Exception(1,"[Vector]: index out of memory! \n", __FILE__, __LINE__, __FUNCTION__); } return _value[index]; } inline T &operator [](const int &index) { if(index > (N-1)) { throw Exception(1,"[Vector]: index out of memory! \n", __FILE__, __LINE__, __FUNCTION__); } return _value[index]; } inline friend Vector operator+ (const Vector &A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] + B[i]; } return tmp; } inline friend Vector operator+ (T A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A + B[i]; } return tmp; } inline friend Vector operator+ (const Vector &A, T B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] + B; } return tmp; } inline friend Vector operator- (const Vector &A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] - B[i]; } return tmp; } inline friend Vector operator- (T A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A - B[i]; } return tmp; } inline friend Vector operator- (const Vector &A, T B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] - B; } return tmp; } inline friend Vector operator- (const Vector &A) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = 0 - A[i]; } return tmp; } inline friend Vector operator* (const Vector &A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] * B[i]; } return tmp; } inline friend Vector operator* (T A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A * B[i]; } return tmp; } inline friend Vector operator* (const Vector &A, T B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] * B; } return tmp; } inline friend Vector operator/ (const Vector &A, T B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] / B; } return tmp; } inline friend bool operator== (const Vector &A, const Vector &B) { if(A.isF32Vec()) { for (int i = 0; i < N; ++i) { if(fabsf(A[i] - B[i])>MSNH_F32_EPS) { return false; } } } else if(A.isF64Vec()) { for (int i = 0; i < N; ++i) { if(fabsf(A[i] - B[i])>MSNH_F64_EPS) { return false; } } } else { for (int i = 0; i < N; ++i) { if(A[i] != B[i]) { return false; } } } return true; } inline friend bool operator!= (const Vector &A, const Vector &B) { if(std::is_same<T,float>::value) { for (int i = 0; i < N; ++i) { if(fabsf(A[i] - B[i])>MSNH_F32_EPS) { return true; } } } else if(std::is_same<T,double>::value) { for (int i = 0; i < N; ++i) { if(fabsf(A[i] - B[i])>MSNH_F64_EPS) { return true; } } } else { for (int i = 0; i < N; ++i) { if(A[i] != B[i]) { return true; } } } return false; } inline Vector &operator +=(const Vector &A) { for (int i = 0; i < N; ++i) { this->_value[i]+=A[i]; } return *this; } inline Vector &operator +=(T A) { for (int i = 0; i < N; ++i) { this->_value[i]+=A; } return *this; } inline Vector &operator -=(const Vector &A) { for (int i = 0; i < N; ++i) { this->_value[i]-=A[i]; } return *this; } inline Vector &operator -=(T A) { for (int i = 0; i < N; ++i) { this->_value[i]-=A; } return *this; } inline Vector &operator *=(const Vector &A) { for (int i = 0; i < N; ++i) { this->_value[i]*=A[i]; } return *this; } inline Vector &operator *=(T A) { for (int i = 0; i < N; ++i) { this->_value[i]*=A; } return *this; } inline Vector &operator /=(T A) { #ifdef USE_OMP #pragma omp parallel for num_threads(OMP_THREAD) reduction(+:len) #endif for (int i = 0; i < N; ++i) { this->_value[i]/=A; } return *this; } private: T _value[N]; }; typedef Vector<3,double> EulerD; typedef Vector<3,double> TransformD; typedef Vector<3,double> RotationVecD; typedef Vector<2,double> Vector2D; typedef Vector<3,double> Vector3D; typedef Vector<4,double> Vector4D; typedef Vector<3,float> EulerF; typedef Vector<3,float> TransformF; typedef Vector<3,float> RotationVecF; typedef Vector<2,float> Vector2F; typedef Vector<3,float> Vector3F; typedef Vector<4,float> Vector4F; } #endif
bls.c
/* Licensed under a 3-clause BSD style license - see LICENSE.rst */ #include <math.h> #include <float.h> #include <stdlib.h> #if defined(_OPENMP) #include <omp.h> #endif #ifndef INFINITY #define INFINITY (1.0 / 0.0) #endif void compute_objective( double y_in, double y_out, double ivar_in, double ivar_out, int obj_flag, double* objective, double* log_likelihood, double* depth, double* depth_err, double* depth_snr ) { if (obj_flag) { double arg = y_out - y_in; *log_likelihood = 0.5*ivar_in*arg*arg; *objective = *log_likelihood; } else { *depth = y_out - y_in; *depth_err = sqrt(1.0 / ivar_in + 1.0 / ivar_out); *depth_snr = *depth / *depth_err; *objective = *depth_snr; } } inline double wrap_into (double x, double period) { return x - period * floor(x / period); } int run_bls ( // Inputs int N, // Length of the time array double* t, // The list of timestamps double* y, // The y measured at ``t`` double* ivar, // The inverse variance of the y array int n_periods, double* periods, // The period to test in units of ``t`` int n_durations, // Length of the durations array double* durations, // The durations to test in units of ``bin_duration`` int oversample, // The number of ``bin_duration`` bins in the maximum duration int obj_flag, // A flag indicating the periodogram type // 0 - depth signal-to-noise // 1 - log likelihood // Outputs double* best_objective, // The value of the periodogram at maximum double* best_depth, // The estimated depth at maximum double* best_depth_err, // The uncertainty on ``best_depth`` double* best_duration, // The best fitting duration in units of ``t`` double* best_phase, // The phase of the mid-transit time in units of // ``t`` double* best_depth_snr, // The signal-to-noise ratio of the depth estimate double* best_log_like // The log likelihood at maximum ) { // Start by finding the period and duration ranges double max_period = periods[0], min_period = periods[0]; int k; for (k = 1; k < n_periods; ++k) { if (periods[k] < min_period) min_period = periods[k]; if (periods[k] > max_period) max_period = periods[k]; } if (min_period < DBL_EPSILON) return 1; double min_duration = durations[0], max_duration = durations[0]; for (k = 1; k < n_durations; ++k) { if (durations[k] < min_duration) min_duration = durations[k]; if (durations[k] > max_duration) max_duration = durations[k]; } if ((max_duration > min_period) || (min_duration < DBL_EPSILON)) return 2; // Compute the durations in terms of bin_duration double bin_duration = min_duration / ((double)oversample); int max_n_bins = (int)(ceil(max_period / bin_duration)) + oversample; int nthreads, blocksize = max_n_bins+1; #pragma omp parallel { #if defined(_OPENMP) nthreads = omp_get_num_threads(); #else nthreads = 1; #endif } // Allocate the work arrays double* mean_y_0 = (double*)malloc(nthreads*blocksize*sizeof(double)); if (mean_y_0 == NULL) { return -2; } double* mean_ivar_0 = (double*)malloc(nthreads*blocksize*sizeof(double)); if (mean_ivar_0 == NULL) { free(mean_y_0); return -3; } // Pre-accumulate some factors. double min_t = INFINITY; double sum_y = 0.0, sum_ivar = 0.0; int i; #pragma omp parallel for reduction(+:sum_y), reduction(+:sum_ivar) for (i = 0; i < N; ++i) { min_t = fmin(min_t, t[i]); sum_y += y[i] * ivar[i]; sum_ivar += ivar[i]; } // Loop over periods and do the search int p; #pragma omp parallel for for (p = 0; p < n_periods; ++p) { #if defined(_OPENMP) int ithread = omp_get_thread_num(); #else int ithread = 0; #endif int block = blocksize * ithread; double period = periods[p]; int n_bins = (int)(ceil(period / bin_duration)) + oversample; double* mean_y = mean_y_0 + block; double* mean_ivar = mean_ivar_0 + block; // This first pass bins the data into a fine-grain grid in phase from zero // to period and computes the weighted sum and inverse variance for each // bin. int n, ind; for (n = 0; n < n_bins+1; ++n) { mean_y[n] = 0.0; mean_ivar[n] = 0.0; } for (n = 0; n < N; ++n) { int ind = (int)(wrap_into(t[n] - min_t, period) / bin_duration) + 1; mean_y[ind] += y[n] * ivar[n]; mean_ivar[ind] += ivar[n]; } // To simplify calculations below, we wrap the binned values around and pad // the end of the array with the first ``oversample`` samples. for (n = 1, ind = n_bins - oversample; n <= oversample; ++n, ++ind) { mean_y[ind] = mean_y[n]; mean_ivar[ind] = mean_ivar[n]; } // To compute the estimates of the in-transit flux, we need the sum of // mean_y and mean_ivar over a given set of transit points. To get this // fast, we can compute the cumulative sum and then use differences between // points separated by ``duration`` bins. Here we convert the mean arrays // to cumulative sums. for (n = 1; n <= n_bins; ++n) { mean_y[n] += mean_y[n-1]; mean_ivar[n] += mean_ivar[n-1]; } // Then we loop over phases (in steps of n_bin) and durations and find the // best fit value. By looping over durations here, we get to reuse a lot of // the computations that we did above. double objective, log_like, depth, depth_err, depth_snr; best_objective[p] = -INFINITY; int k; for (k = 0; k < n_durations; ++k) { int dur = (int)(round(durations[k] / bin_duration)); int n_max = n_bins-dur; for (n = 0; n <= n_max; ++n) { // Estimate the in-transit and out-of-transit flux double y_in = mean_y[n+dur] - mean_y[n]; double ivar_in = mean_ivar[n+dur] - mean_ivar[n]; double y_out = sum_y - y_in; double ivar_out = sum_ivar - ivar_in; // Skip this model if there are no points in transit if ((ivar_in < DBL_EPSILON) || (ivar_out < DBL_EPSILON)) { continue; } // Normalize to compute the actual value of the flux y_in /= ivar_in; y_out /= ivar_out; // Either compute the log likelihood or the signal-to-noise // ratio compute_objective(y_in, y_out, ivar_in, ivar_out, obj_flag, &objective, &log_like, &depth, &depth_err, &depth_snr); // If this is the best result seen so far, keep it if (y_out >= y_in && objective > best_objective[p]) { best_objective[p] = objective; // Compute the other parameters compute_objective(y_in, y_out, ivar_in, ivar_out, (obj_flag == 0), &objective, &log_like, &depth, &depth_err, &depth_snr); best_depth[p] = depth; best_depth_err[p] = depth_err; best_depth_snr[p] = depth_snr; best_log_like[p] = log_like; best_duration[p] = dur * bin_duration; best_phase[p] = fmod(n*bin_duration + 0.5*best_duration[p] + min_t, period); } } } } // Clean up free(mean_y_0); free(mean_ivar_0); return 0; }
utils.c
#include "utils.h" void init_random_matrix(int* matrix, int num_lines_per_proc, int size){ int i, j; srand(size); #pragma omp parallel for private(i, j) collapse(2) for(i = 0; i < num_lines_per_proc; i++){ for(j = 0; j < size; j++){ matrix[i * size + j] = rand() % (size * 2) + 1; } } } int compare_asc(const void* a, const void* b) { int A = *(int*)a; int B = *(int*)b; return A - B; } int compare_dsc(const void* a, const void* b) { int A = *(int*)a; int B = *(int*)b; return B - A; } void sort_lines_start_finish(int* matrix, int remaining_lines, int size){ int i, j; int line[size]; int start = size - remaining_lines - 1; int finish = size; for (i = start; i < finish; i++){ #pragma omp parallel for private(j) for (j = 0; j < size ; j++) { line[j] = matrix[i * size + j]; } if (i % 2 == 0) {// even lines > ascending qsort(line, size, sizeof(int), compare_asc); } else { // odd lines > decending qsort(line, size, sizeof(int), compare_dsc); } #pragma omp parallel for private(j) for (j = 0; j < size ; j++) { matrix[i * size + j] = line[j]; } } } void sort_lines(int* matrix, int num_lines_per_proc, int size){ int i, j; int line[size]; for (i = 0; i < num_lines_per_proc; i++){ #pragma omp parallel for private(j) for (j = 0; j < size ; j++) { line[j] = matrix[i * size + j]; } if (i % 2 == 0) {// even lines > ascending qsort(line, size, sizeof(int), compare_asc); } else { // odd lines > decending qsort(line, size, sizeof(int), compare_dsc); } #pragma omp parallel for private(j) for (j = 0; j < size ; j++) { matrix[i * size + j] = line[j]; } } } void sort_columns(int* matrix, int size) { int i, j; int col[size]; for (j = 0; j < size; j++){ #pragma omp parallel for private(i) for (i = 0; i < size; i++) { col[i] = matrix[i * size + j]; } qsort(col, size, sizeof(int), compare_asc); #pragma omp parallel for private(i) for (i = 0; i < size; i++) { matrix[i * size + j] = col[i] ; } } } int check_sorted(int* matrix, int num_lines_per_proc, int size){ int i, j, even, current; for (i = 0; i < size; i++) { if (i % 2 == 0){ even = 1; // even line > ascending } else { even = 0; // odd line > descending } for (j = 0; j < size - 1; j++) { current = i * size + j; if (even){ if (matrix[current] > matrix[current + 1]) return 0; } else { if (matrix[current] < matrix[current + 1]) return 0; } } } return 1; } void print_matrix(int* matrix, int num_lines_per_proc, int size){ int i, j; for (i = 0; i < num_lines_per_proc; i++) { for (j = 0; j < size; j++) { printf("%d\t", matrix[i * size + j]); } printf("\n"); } printf("\n"); }
GB_binop__lxor_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_fp64) // A*D function (colscale): GB (_AxD__lxor_fp64) // D*A function (rowscale): GB (_DxB__lxor_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_fp64) // C=scalar+B GB (_bind1st__lxor_fp64) // C=scalar+B' GB (_bind1st_tran__lxor_fp64) // C=A+scalar GB (_bind2nd__lxor_fp64) // C=A'+scalar GB (_bind2nd_tran__lxor_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_FP64 || GxB_NO_LXOR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB046-doall2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two-dimensional array computation: Only one loop is associated with the omp for construct. The inner loop's loop iteration variable needs an explicit private() clause, otherwise it will be shared by default. */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int a[100][100]; int main() { int i; int j; #pragma omp parallel for private (i,j) for (i = 0; i <= 99; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= 99; j += 1) { a[i][j] = i * 200 + j; } } #pragma omp parallel for private (i,j) for (i = 0; i <= 99; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= 99; j += 1) { a[i][j] = a[i][j] + 1; } } for (i = 0; i <= 99; i += 1) { for (j = 0; j <= 99; j += 1) { printf("%d",a[i][j]); } } return 0; }
GB_unaryop__lnot_uint16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_uint8 // op(A') function: GB_tran__lnot_uint16_uint8 // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_uint8 ( uint16_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__land_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__land_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__land_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__land_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_uint16) // A*D function (colscale): GB (_AxD__land_uint16) // D*A function (rowscale): GB (_DxB__land_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__land_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__land_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_uint16) // C=scalar+B GB (_bind1st__land_uint16) // C=scalar+B' GB (_bind1st_tran__land_uint16) // C=A+scalar GB (_bind2nd__land_uint16) // C=A'+scalar GB (_bind2nd_tran__land_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_UINT16 || GxB_NO_LAND_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_int16_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_uint32) // op(A') function: GB (_unop_tran__identity_int16_uint32) // C type: int16_t // A type: uint32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_uint32) ( int16_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
otherother-conv.c
/* Test and timing harness program for developing a multichannel multikernel convolution (as used in deep learning networks) Note there are some simplifications around this implementation, in particular with respect to computing the convolution at edge pixels of the image. Author: David Gregg Date: February 2017 Version 1.3 : Fixed which loop variables were being incremented in write_out(); Fixed dimensions of output and control_output matrices in main function Version 1.2 : Changed distribution of test data to (hopefully) eliminate random walk of floating point error; Also introduced checks to restrict kernel-order to a small set of values Version 1.1 : Fixed bug in code to create 4d matrix */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <assert.h> #include <omp.h> #include <math.h> #include <x86intrin.h> /* the following two definitions of DEBUGGING control whether or not debugging information is written out. To put the program into debugging mode, uncomment the following line: */ //#define DEBUGGING(_x) _x /* to stop the printing of debugging information, use the following line: */ #define DEBUGGING(_x) /* write 3d matrix to stdout */ void write_out(float *** a, int dim0, int dim1, int dim2) { int i, j, k; for ( i = 0; i < dim0; i++ ) { printf("Outer dimension number %d\n", i); for ( j = 0; j < dim1; j++ ) { for ( k = 0; k < dim2 - 1; k++ ) { printf("%f, ", a[i][j][k]); } // print end of line printf("%f\n", a[i][j][dim2-1]); } } } /* create new empty 4d matrix */ float **** new_empty_4d_matrix(int dim0, int dim1, int dim2, int dim3) { float **** result = malloc(dim0 * sizeof(float***)); float *** mat1 = malloc(dim0 * dim1 * sizeof(float**)); float ** mat2 = malloc(dim0 * dim1 * dim2 * sizeof(float*)); float * mat3 = malloc(dim0 * dim1 * dim2 *dim3 * sizeof(float)); int i, j, k; for ( i = 0; i < dim0; i++ ) { result[i] = &(mat1[i*dim1]); for ( j = 0; j < dim1; j++ ) { result[i][j] = &(mat2[i*dim1*dim2 + j*dim2]); for ( k = 0; k < dim2; k++ ) { result[i][j][k] = &(mat3[i*dim1*dim2*dim3+j*dim2*dim3+k*dim3]); } } } return result; } /* create new empty 3d matrix */ float *** new_empty_3d_matrix(int dim0, int dim1, int dim2) { float **** mat4d; float *** mat3d; // create a 4d matrix with single first dimension mat4d = new_empty_4d_matrix(1, dim0, dim1, dim2); // now throw away out first dimension mat3d = mat4d[0]; free(mat4d); return mat3d; } /* take a copy of the matrix and return in a newly allocated matrix */ float **** copy_4d_matrix(float **** source_matrix, int dim0, int dim1, int dim2, int dim3) { int i, j, k, l; float **** result = new_empty_4d_matrix(dim0, dim1, dim2, dim3); for ( i = 0; i < dim0; i++ ) { for ( j = 0; j < dim1; j++ ) { for ( k = 0; k < dim2; k++ ) { for ( l = 0; l < dim3; l++ ) { result[i][j][k][l] = source_matrix[i][j][k][l]; } } } } return result; } /* create a matrix and fill it with random numbers */ float **** gen_random_4d_matrix(int dim0, int dim1, int dim2, int dim3) { float **** result; int i, j, k, l; struct timeval seedtime; int seed; result = new_empty_4d_matrix(dim0, dim1, dim2, dim3); /* use the microsecond part of the current time as a pseudorandom seed */ gettimeofday(&seedtime, NULL); seed = seedtime.tv_usec; srandom(seed); /* fill the matrix with random numbers */ const int range = 1 << 16; // 2^16 const int bias = 1 << 12; // 2^12 float offset = 4.0; for ( i = 0; i < dim0; i++ ) { for ( j = 0; j < dim1; j++ ) { for ( k = 0; k < dim2; k++ ) { for ( l = 0; l < dim3; l++ ) { // generate uniform random integer with mean of zero long long rand = random(); // now cut down the range and bias the mean to reduce // the likelihood of large floating point round-off errors int reduced_range = (rand % range); float num = (((float) reduced_range) / ((float) bias))+offset; result[i][j][k][l] = num; } } } } return result; } /* create a matrix and fill it with random numbers */ float *** gen_random_3d_matrix(int dim0, int dim1, int dim2) { float **** mat4d; float *** mat3d; // create a 4d matrix with single first dimension mat4d = gen_random_4d_matrix(1, dim0, dim1, dim2); // now throw away out first dimension mat3d = mat4d[0]; free(mat4d); return mat3d; } /* check the sum of absolute differences is within reasonable epsilon */ void check_result(float *** result, float *** control, int dim0, int dim1, int dim2) { int i, j, k; double sum_abs_diff = 0.0; const double EPSILON = 0.0625; //printf("SAD\n"); for ( i = 0; i < dim0; i++ ) { for ( j = 0; j < dim1; j++ ) { for ( k = 0; k < dim2; k++ ) { double diff = fabs(control[i][j][k] - result[i][j][k]); assert( diff >= 0.0 ); sum_abs_diff = sum_abs_diff + diff; } } } if ( sum_abs_diff > EPSILON ) { fprintf(stderr, "WARNING: sum of absolute differences (%f) > EPSILON (%f)\n", sum_abs_diff, EPSILON); } else { printf("COMMENT: sum of absolute differences (%f) within acceptable range (%f)\n", sum_abs_diff, EPSILON); } } /* the slow but correct version of matmul written by David */ void multichannel_conv(float *** image, float **** kernels, float *** output, int width, int height, int nchannels, int nkernels, int kernel_order) { int h, w, x, y, c, m; for ( m = 0; m < nkernels; m++ ) { for ( w = 0; w < width; w++ ) { for ( h = 0; h < height; h++ ) { float sum = 0.0; for ( c = 0; c < nchannels; c++ ) { for ( x = 0; x < kernel_order; x++) { for ( y = 0; y < kernel_order; y++ ) { sum += image[w+x][h+y][c] * kernels[m][c][x][y]; } } output[m][w][h] = sum; } } } } } /*Our version that is not parallelized*/ void team_conv_not_parallel(float *** image, float **** kernels, float *** output, int width, int height, int nchannels, int nkernels, int kernel_order) { int h, w,x, y, c, m; register float sum; register float sum2; for ( m = 0; m < nkernels; m++ ) { for ( w = 0; w < width; w++ ) { for ( h = 0; h < height; h+=2 ) { //do the thing in the slides where you reduce number of memory accesses //gives diminishing returns though sum = 0.0; sum2 = 0.0; for ( c = 0; c < nchannels; c++ ) { for ( x = 0; x < kernel_order; x++) { for ( y = 0; y < kernel_order; y++ ) { sum += image[w+x][h+y][c] * kernels[m][c][x][y]; sum2 += image[w+x][h+1+y][c] * kernels[m][c][x][y]; } } output[m][w][h] = sum; output[m][w][h+1]=sum2; } } } } } void superFastPart2(float *** image, float **** kernels, float *** output, int width, int height, int nchannels, int kernel_order, int m){ int h, w,x, y, c; float sum1, sum2, sum3, sum4; for ( w = 0; w < width; w++ ) { for ( h = 0; h < height; h+4 ) { //do the thing in the slides where you reduce number of memory accesses //gives diminishing returns though sum1 = 0.0; sum2 = 0.0; sum3 = 0.0; sum4 = 0.0; for ( c = 0; c < nchannels; c++ ) { for ( x = 0; x < kernel_order; x++) { for ( y = 0; y < kernel_order; y++ ) { sum1 += image[w+x][h+y][c] * kernels[m][c][x][y]; sum2 += image[w+x][h+y+1][c] * kernels[m][c][x][y]; sum3 += image[w+x][h+y+2][c] * kernels[m][c][x][y]; sum4 += image[w+x][h+y+3][c] * kernels[m][c][x][y]; } } output[m][w][h] = sum1; output[m][w][h+1] = sum2; output[m][w][h+2] = sum3; output[m][w][h+3] = sum4; } } } } void superFast(float *** image, float **** kernels, float *** output, int width, int height, int nchannels, int nkernels, int kernel_order){ int m; #pragma omp parallel for for(m = 0; m < nkernels; m++){ superFastPart2(image, kernels, output, width, height, nchannels, kernel_order, m); } } /* the fast version of matmul written by the team */ void team_conv(float *** image, float **** kernels, float *** output, int width, int height, int nchannels, int nkernels, int kernel_order) { /*Our version*/ if(width < 32 || (nchannels < 4 && nkernels < 4 && kernel_order < 4)) team_conv_not_parallel(image, kernels, output, width, height, nchannels, nkernels, kernel_order); else if (height % 4 == 0){ printf("Parallel vectors\n"); int h, w,x, y, c, m; __m128 sumVector, imageVector, kernelsVector, product; float iv[4]; #pragma omp parallel for private(sumVector, imageVector, kernelsVector, product, iv, h, w,x, y, c, m) for ( m = 0; m < nkernels; m++ ) { for ( w = 0; w < width; w++ ) { for ( h = 0; h < height; h+=4 ) { //do the thing in the slides where you reduce number of memory accesses //gives diminishing returns though sumVector = _mm_setzero_ps(); for ( c = 0; c < nchannels; c++ ) { for ( x = 0; x < kernel_order; x++) { for ( y = 0; y < kernel_order; y++ ) { //create a vector from the image matrix iv[0] = image[w+x][h+y][c]; iv[1] = image[w+x][h+y+1][c]; iv[2] = image[w+x][h+y+2][c]; iv[3] = image[w+x][h+y+3][c]; imageVector = _mm_loadu_ps(iv); //create a vector from the kernel matrix kernelsVector = _mm_set1_ps(kernels[m][c][x][y]); //multiply the two vectors (8 floats being multiplied together at once) product = _mm_mul_ps(imageVector, kernelsVector); //add the product to a running total sumVector = _mm_add_ps(sumVector, product); } } float sum[4]; _mm_storeu_ps(sum, sumVector); int i; for(i = 0; i < 4; i++) output[m][w][h+i] = sum[i]; } } } } //superFast(image, kernels, output, width, height, nchannels, nkernels, kernel_order); } else if(height % 2 == 0){ int h, w,x, y, c, m; register float sum; register float sum2; #pragma omp parallel for private(h, w, x, y, c, m, sum, sum2) for ( m = 0; m < nkernels; m++ ) { for ( w = 0; w < width; w++ ) { for ( h = 0; h < height; h+=2 ) { //do the thing in the slides where you reduce number of memory accesses //gives diminishing returns though sum = 0.0; sum2 = 0.0; for ( c = 0; c < nchannels; c++ ) { for ( x = 0; x < kernel_order; x++) { for ( y = 0; y < kernel_order; y++ ) { sum += image[w+x][h+y][c] * kernels[m][c][x][y]; sum2 += image[w+x][h+1+y][c] * kernels[m][c][x][y]; } } output[m][w][h] = sum; output[m][w][h+1]=sum2; } } } } }//else else{ int h, w,x, y, c, m; register float sum; #pragma omp parallel for private(h, w, x, y, c, m, sum) for ( m = 0; m < nkernels; m++ ) { for ( w = 0; w < width; w++ ) { for ( h = 0; h < height; h++ ) { //do the thing in the slides where you reduce number of memory accesses //gives diminishing returns though sum = 0.0; for ( c = 0; c < nchannels; c++ ) { for ( x = 0; x < kernel_order; x++) { for ( y = 0; y < kernel_order; y++ ) { sum += image[w+x][h+y][c] * kernels[m][c][x][y]; } } output[m][w][h] = sum; } } } } } } int main(int argc, char ** argv) { //float image[W][H][C]; //float kernels[M][C][K][K]; //float output[M][W][H]; float *** image, **** kernels, *** output; float *** control_output; long long mul_time; int width, height, kernel_order, nchannels, nkernels; struct timeval start_time; struct timeval stop_time; if ( argc != 6 ) { fprintf(stderr, "Usage: conv-harness <image_width> <image_height> <kernel_order> <number of channels> <number of kernels>\n"); exit(1); } else { width = atoi(argv[1]); height = atoi(argv[2]); kernel_order = atoi(argv[3]); nchannels = atoi(argv[4]); nkernels = atoi(argv[5]); } switch ( kernel_order ) { case 1: case 3: case 5: case 7: break; default: fprintf(stderr, "FATAL: kernel_order must be 1, 3, 5 or 7, not %d\n", kernel_order); exit(1); } /* allocate the matrices */ image = gen_random_3d_matrix(width+kernel_order, height + kernel_order, nchannels); kernels = gen_random_4d_matrix(nkernels, nchannels, kernel_order, kernel_order); output = new_empty_3d_matrix(nkernels, width, height); control_output = new_empty_3d_matrix(nkernels, width, height); //DEBUGGING(write_out(A, a_dim1, a_dim2)); /* record starting time of David's code*/ gettimeofday(&start_time, NULL); /* use a simple multichannel convolution routine to produce control result */ multichannel_conv(image, kernels, control_output, width, height, nchannels, nkernels, kernel_order); /* record finishing time */ gettimeofday(&stop_time, NULL); mul_time = (stop_time.tv_sec - start_time.tv_sec) * 1000000L + (stop_time.tv_usec - start_time.tv_usec); printf("David's conv time: %lld microseconds\n", mul_time); long long davidsTime = mul_time; /* record starting time of team's code*/ gettimeofday(&start_time, NULL); /* perform student team's multichannel convolution */ team_conv(image, kernels, output, width, height, nchannels, nkernels, kernel_order); /* record finishing time */ gettimeofday(&stop_time, NULL); mul_time = (stop_time.tv_sec - start_time.tv_sec) * 1000000L + (stop_time.tv_usec - start_time.tv_usec); printf("Team conv time: %lld microseconds\n", mul_time); long long teamsTime = mul_time; long long factor = davidsTime / teamsTime; if(factor!=0) printf("Your implementation is between %lld and %lld times faster\n", factor, factor+1); DEBUGGING(write_out(output, nkernels, width, height)); /* now check that the team's multichannel convolution routine gives the same answer as the known working version */ check_result(output, control_output, nkernels, width, height); return 0; }
nested_serialized.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { omp_set_nested(0); #pragma omp parallel num_threads(4) { print_ids(0); print_ids(1); #pragma omp parallel num_threads(4) { print_ids(0); print_ids(1); print_ids(2); } } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:.+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // Note that we cannot ensure that the worker threads have already called barrier_end and implicit_task_end before parallel_end! // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS: 0: NULL_POINTER=[[NULL:.*$]] // THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:.+]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_PARALLEL_FUNCTION:0x[0-f]+]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=[[NESTED_PARALLEL_FUNCTION]], invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: level 2: parallel_id=0, task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] return 0; }
sparse-new.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <libgen.h> #include <omp.h> #define EPSILON 1.0E-6 unsigned int bots_arg_size = 50; unsigned int bots_arg_size_1 = 80; #define TRUE 1 #define FALSE 0 #define BOTS_RESULT_SUCCESSFUL 1 #define BOTS_RESULT_UNSUCCESSFUL 0 /*********************************************************************** * checkmat: **********************************************************************/ int checkmat (float *M, float *N) { int i, j; float r_err; int bad = 0; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j]; if (r_err < 0.0 ) r_err = -r_err; r_err = r_err / M[i*bots_arg_size_1+j]; if(r_err > EPSILON) { fprintf(stderr,"Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err); bad = 1; } } } return bad ? FALSE : TRUE; } /*********************************************************************** * genmat: **********************************************************************/ void genmat (float *M[]) { int null_entry, init_val, i, j, ii, jj; float *p; init_val = 1325; /* generating the structure */ for (ii=0; ii < bots_arg_size; ii++) { for (jj=0; jj < bots_arg_size; jj++) { /* computing null entries */ null_entry=FALSE; if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE; if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE; if (ii%2==1) null_entry = TRUE; if (jj%2==1) null_entry = TRUE; if (ii==jj) null_entry = FALSE; if (ii==jj-1) null_entry = FALSE; if (ii-1 == jj) null_entry = FALSE; /* allocating matrix */ if (null_entry == FALSE){ M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); if ((M[ii*bots_arg_size+jj] == NULL)) { fprintf(stderr,"Error: Out of memory\n"); exit(101); } /* initializing matrix */ p = M[ii*bots_arg_size+jj]; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { init_val = (3125 * init_val) % 65536; (*p) = (float)((init_val - 32768.0) / 16384.0); p++; } } } else { M[ii*bots_arg_size+jj] = NULL; } } } } /*********************************************************************** * print_structure: **********************************************************************/ void print_structure(char *name, float *M[]) { int ii, jj; fprintf(stderr,"Structure for matrix %s @ 0x%p\n",name, M); for (ii = 0; ii < bots_arg_size; ii++) { for (jj = 0; jj < bots_arg_size; jj++) { if (M[ii*bots_arg_size+jj]!=NULL) {fprintf(stderr,"x");} else fprintf(stderr," "); } fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } /*********************************************************************** * allocate_clean_block: **********************************************************************/ float * allocate_clean_block() { int i,j; float *p, *q; p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); q=p; if (p!=NULL){ for (i = 0; i < bots_arg_size_1; i++) for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;} } else { fprintf(stderr,"Error: Out of memory\n"); exit (101); } return (q); } /*********************************************************************** * lu0: **********************************************************************/ void lu0(float *diag) { int i, j, k; for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) { diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bdiv: **********************************************************************/ void bdiv(float *diag, float *row) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (k=0; k<bots_arg_size_1; k++) { row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bmod: **********************************************************************/ void bmod(float *row, float *col, float *inner) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } /*********************************************************************** * fwd: **********************************************************************/ void fwd(float *diag, float *col) { int i, j, k; for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } void sparselu_init (float ***pBENCH, char *pass) { *pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *)); genmat(*pBENCH); print_structure(pass, *pBENCH); } void sparselu_par_call(float **BENCH) { int ii, jj, kk; fprintf(stderr,"Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ", bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1); #pragma omp parallel #pragma omp single { double d1 = omp_get_wtime(); for (kk=0; kk<bots_arg_size; kk++) { #pragma omp task firstprivate(kk) shared(BENCH) depend(inout:BENCH[kk*bots_arg_size+kk]) lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) #pragma omp task firstprivate(kk, jj) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj]) depend(inout:BENCH[kk*bots_arg_size+kk]) fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) #pragma omp task firstprivate(kk, ii) shared(BENCH) depend(inout:BENCH[ii*bots_arg_size+kk]) depend(in:BENCH[kk*bots_arg_size+kk]) bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); #pragma omp task firstprivate(kk, jj, ii) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj],BENCH[ii*bots_arg_size+kk]) depend(inout:BENCH[ii*bots_arg_size+jj]) bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } #pragma omp taskwait double d2 = omp_get_wtime(); fprintf(stderr," Par Time: %f\n",d2-d1); } fprintf(stderr," completed!\n"); } void sparselu_seq_call(float **BENCH) { int ii, jj, kk; double d1 = omp_get_wtime(); for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } double d2 = omp_get_wtime(); fprintf(stderr,"Serial Time: %f\n",d2-d1); } void sparselu_fini (float **BENCH, char *pass) { print_structure(pass, BENCH); } int sparselu_check(float **SEQ, float **BENCH) { int ii,jj,ok=1; for (ii=0; ((ii<bots_arg_size) && ok); ii++) { for (jj=0; ((jj<bots_arg_size) && ok); jj++) { if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); if(!ok)abort(); } } if (ok) fprintf(stderr,"stämmer\n"); if (ok) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; } int main ( int argc, char *argv[]) { float **SEQ,**BENCH; sparselu_init(&BENCH,"benchmark"); sparselu_par_call(BENCH); sparselu_fini(BENCH,"benchmark"); sparselu_init(&SEQ,"serial"); sparselu_seq_call(SEQ); sparselu_fini(SEQ,"serial"); fprintf(stderr,"Testar om Parallel och Seriell version stämmer med varandra...\n"); return (sparselu_check(SEQ,BENCH) == BOTS_RESULT_SUCCESSFUL) ? 0 : 1; }
utils.h
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MINDQUANTUM_UTILS_H_ #define MINDQUANTUM_UTILS_H_ #ifdef ENABLE_OPENMP # include <omp.h> #endif // ENABLE_OPENMP // NOLINT #include <x86intrin.h> #include <complex> #include <ctime> #include <map> #include <numeric> #include <random> #include <set> #include <string> #include <utility> #include <vector> #include "core/type.h" namespace mindquantum { extern const VT<CT<MT>> POLAR; template <typename T, typename ST> CT<T> ComplexInnerProduct(const ST *v1, const ST *v2, Index len) { // len is (1UL>>n_qubits)*2 ST real_part = 0; ST imag_part = 0; auto size = len / 2; #pragma omp parallel for reduction(+ : real_part, imag_part) for (Index i = 0; i < size; i++) { real_part += v1[2 * i] * v2[2 * i] + v1[2 * i + 1] * v2[2 * i + 1]; imag_part += v1[2 * i] * v2[2 * i + 1] - v1[2 * i + 1] * v2[2 * i]; } CT<T> result = {static_cast<T>(real_part), static_cast<T>(imag_part)}; return result; } template <typename T, typename ST> CT<T> ComplexInnerProductWithControl(const ST *v1, const ST *v2, Index len, Index ctrlmask) { // len is (1UL>>n_qubits)*2 ST real_part = 0; ST imag_part = 0; auto size = len / 2; #pragma omp parallel for reduction(+ : real_part, imag_part) for (Index i = 0; i < size; i++) { if ((i & ctrlmask) == ctrlmask) { real_part += v1[2 * i] * v2[2 * i] + v1[2 * i + 1] * v2[2 * i + 1]; imag_part += v1[2 * i] * v2[2 * i + 1] - v1[2 * i + 1] * v2[2 * i]; } } CT<T> result = {static_cast<T>(real_part), static_cast<T>(imag_part)}; return result; } Index GetControlMask(const VT<Index> &ctrls); PauliMask GetPauliMask(const VT<PauliWord> &pws); // inline int CountOne(uint32_t n) { return __popcntd(n); } // inline int CountOne(uint64_t n) { return __popcntq(n);} inline int CountOne(uint32_t n) { int result; asm("popcnt %1,%0" : "=r"(result) : "r"(n)); return result; } inline int CountOne(int64_t n) { uint32_t *p = reinterpret_cast<uint32_t *>(&n); return CountOne(p[0]) + CountOne(p[1]); } // inline int CountOne(uint64_t n) { // uint8_t *p = reinterpret_cast<uint8_t *>(&n); // return POPCNTTABLE[p[0]] + POPCNTTABLE[p[1]] + POPCNTTABLE[p[2]] + // POPCNTTABLE[p[3]] + POPCNTTABLE[p[4]] + POPCNTTABLE[p[5]] + // POPCNTTABLE[p[6]] + POPCNTTABLE[p[7]]; // } // inline int CountOne(uint32_t n) { // uint8_t *p = reinterpret_cast<uint8_t *>(&n); // return POPCNTTABLE[p[0]] + POPCNTTABLE[p[1]] + POPCNTTABLE[p[2]] + // POPCNTTABLE[p[3]]; // } template <typename T> PauliTerm<T> GenerateRandomPauliTerm(Index n_qubits) { std::default_random_engine e(std::clock()); std::uniform_real_distribution<T> ut(-1.0, 1.0); auto coeff = ut(e); std::uniform_int_distribution<char> uit(0, 3); VT<PauliWord> pws; for (Index i = 0; i < n_qubits; i++) { auto p = uit(e); if (p != 3) { pws.push_back(std::make_pair(i, (p + 'X'))); } } return std::make_pair(pws, coeff); } template <typename T> void ShowPauliTerm(const PauliTerm<T> &pt) { std::cout << pt.second << " ["; for (Index i = 0; i < static_cast<Index>(pt.first.size()); i++) { auto &pw = pt.first[i]; std::cout << pw.second << pw.first; if (i != static_cast<Index>(pt.first.size()) - 1) { std::cout << " "; } } std::cout << "]" << std::endl; } TimePoint NOW(); int TimeDuration(TimePoint start, TimePoint end); template <typename T> void PrintVec(T *vec, size_t len) { auto cvec = reinterpret_cast<CTP<T>>(vec); for (size_t i = 0; i < len / 2; i++) { std::cout << cvec[i] << std::endl; } } } // namespace mindquantum #endif // MINDQUANTUM_UTILS_H_
GB_unaryop__lnot_fp64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp64_bool // op(A') function: GB_tran__lnot_fp64_bool // C type: double // A type: bool // cast: double cij = (double) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp64_bool ( double *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
add.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB BT code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include "timers.h" //--------------------------------------------------------------------- // addition of update to the vector u //--------------------------------------------------------------------- void add() { int i, j, k, m; //kai //int k15; // consistent_data(&k15, "int", 1); if (timeron) timer_start(t_add); #pragma omp parallel for default(shared) private(i,j,k,m) for (k = 1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m]; } } } //kai k15 = k; // printf("k15=%p\n",&k15); } if (timeron) timer_stop(t_add); }
GB_binop__first_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_fp32) // A.*B function (eWiseMult): GB (_AemultB_01__first_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__first_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__first_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fp32) // A*D function (colscale): GB (_AxD__first_fp32) // D*A function (rowscale): GB (_DxB__first_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__first_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__first_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fp32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: float // A type: float // B,b type: float // BinaryOp: cij = aij #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FP32 || GxB_NO_FIRST_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__first_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__first_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-5,6)),ceild(8*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(4*t1+Ny+5,24)),floord(8*t2+Ny+4,24)),floord(8*t1-8*t2+Nz+Ny+3,24));t3++) { for (t4=max(max(max(0,ceild(t1-255,256)),ceild(8*t2-Nz-1020,1024)),ceild(24*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(4*t1+Nx+5,1024)),floord(8*t2+Nx+4,1024)),floord(24*t3+Nx+20,1024)),floord(8*t1-8*t2+Nz+Nx+3,1024));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),24*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),24*t3+22),1024*t4+1022),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
LU_omp.c
/********************************************************************** Modify the code-Add OpenMP directives to parallelize the LU kernel ***********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> #include "utils.h" int main(int argc, char * argv[]) { int X=atoi(argv[1]); int Y=X; double ** A=malloc2D(X,Y); init2D(A,X,Y); int i,j,k; double l; struct timeval ts,tf; double total_time; gettimeofday(&ts,NULL); for (k=0;k<X-1;k++) #pragma omp parallel for private(i,j,l) for (i=k+1;i<X;i++) { l=A[i][k]/A[k][k]; for (j=k;j<Y;j++) A[i][j]-=l*A[k][j]; } gettimeofday(&tf,NULL); total_time=(tf.tv_sec-ts.tv_sec)+(tf.tv_usec-ts.tv_usec)*0.000001; printf("LU-OpenMP\t%d\t%.3lf\n",X,total_time); char * filename="output_omp"; print2DFile(A,X,Y,filename); return 0; }
zpotri.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_potri * * Computes the inverse of a Hermitian positive definite * matrix A using the Cholesky factorization * \f[ A = U^H \times U, \f] * or * \f[ A = L \times L^H. \f] * ******************************************************************************* * * @param[in] uplo * = PlasmaUpper: Upper triangle of A is stored; * = PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] pA * On entry, the triangular factor U or L from the Cholesky * factorization A = U^H*U or A = L*L^H, as computed by * plasma_zpotrf. * On exit, the upper or lower triangle of the (Hermitian) * inverse of A, overwriting the input factor U or L. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * ******************************************************************************* * * @retval PLASMA_SUCCESS successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the (i,i) element of the factor U or L is * zero, and the inverse could not be computed. * ******************************************************************************* * * @sa plasma_cpotri * @sa plasma_dpotri * @sa plasma_spotri * ******************************************************************************/ int plasma_zpotri(plasma_enum_t uplo, int n, plasma_complex64_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } // quick return if (imax(n, 0) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_trtri(plasma, PlasmaComplexDouble, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrix. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); // Perform computation. plasma_omp_zpotri(uplo, A, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_potri * * Computes the inverse of a complex Hermitian * positive definite matrix A using the Cholesky factorization * A = U^H*U or A = L*L^H computed by plasma_zpotrf. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * On entry, the triangular factor U or L from the Cholesky * factorization A = U^H*U or A = L*L^H, as computed by * plasma_zpotrf. * On exit, the upper or lower triangle of the (Hermitian) * inverse of A, overwriting the input factor U or L. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zpotri * @sa plasma_omp_zpotri * @sa plasma_omp_cpotri * @sa plasma_omp_dpotri * @sa plasma_omp_spotri * ******************************************************************************/ void plasma_omp_zpotri(plasma_enum_t uplo, plasma_desc_t A, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0) { return; } // Invert triangular part. plasma_pztrtri(uplo, PlasmaNonUnit, A, sequence, request); // Compute product of upper and lower triangle. plasma_pzlauum(uplo, A, sequence, request); }
md2_fmt_plug.c
/* MD2 cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_md2_; #elif FMT_REGISTERS_H john_register_one(&fmt_md2_); #else #include <string.h> #include "arch.h" #include "sph_md2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // OMP_SCALE tuned on core i7 quad core HT // 1 - 153k // 64 - 433k // 128 - 572k // 256 - 612k // 512 - 543k // 1k - 680k ** chosen // 2k - 660k // 4k - 670k // 8k - 680k // 16k - 650k #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 32 #else #define OMP_SCALE (1024) #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define FORMAT_LABEL "MD2" #define FORMAT_NAME "" #define FORMAT_TAG "$md2$" #define TAG_LENGTH 5 #define ALGORITHM_NAME "MD2 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests md2__tests[] = { {"$md2$ab4f496bfb2a530b219ff33031fe06b0", "message digest"}, {"ab4f496bfb2a530b219ff33031fe06b0", "message digest"}, {"921adc047dad311394d2b8553002042d","len=125_____________________________________________________________________________________________________________________x"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; int extra; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (hexlenl(p, &extra) != 32 || extra) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + BINARY_SIZE * 2 + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); strnzcpy(out + TAG_LENGTH, ciphertext, BINARY_SIZE*2 + 1); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[32]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_md2_context ctx; sph_md2_init(&ctx); sph_md2(&ctx, saved_key[index], strlen(saved_key[index])); sph_md2_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void md2_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_md2_ = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, md2__tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, md2_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_convert_full_to_sparse.c
//------------------------------------------------------------------------------ // GB_convert_full_to_sparse: convert a matrix from full to sparse //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GB_convert_full_to_sparse // convert matrix from full to sparse ( GrB_Matrix A, // matrix to convert from full to sparse GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A converting full to sparse", GB0) ; ASSERT (GB_IS_FULL (A) || A->nzmax == 0) ; ASSERT (!GB_IS_BITMAP (A)) ; ASSERT (!GB_IS_SPARSE (A)) ; ASSERT (!GB_IS_HYPERSPARSE (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_JUMBLED (A)) ; ASSERT (!GB_PENDING (A)) ; GBURBLE ("(full to sparse) ") ; //-------------------------------------------------------------------------- // allocate A->p and A->i //-------------------------------------------------------------------------- int64_t avdim = A->vdim ; int64_t avlen = A->vlen ; int64_t anz = avdim * avlen ; ASSERT (GB_Index_multiply (&anz, avdim, avlen) == true) ; int64_t *restrict Ap = NULL ; size_t Ap_size = 0 ; int64_t *restrict Ai = NULL ; size_t Ai_size = 0 ; Ap = GB_MALLOC (avdim+1, int64_t, &Ap_size) ; Ai = GB_MALLOC (anz, int64_t, &Ai_size) ; if (Ap == NULL || Ai == NULL) { // out of memory GB_FREE (&Ap, Ap_size) ; GB_FREE (&Ai, Ai_size) ; return (GrB_OUT_OF_MEMORY) ; } A->p = Ap ; A->p_size = Ap_size ; A->i = Ai ; A->i_size = Ai_size ; A->plen = avdim ; A->nvec = avdim ; A->nvec_nonempty = (avlen == 0) ? 0 : avdim ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (anz, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // fill the A->p and A->i pattern //-------------------------------------------------------------------------- int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k <= avdim ; k++) { Ap [k] = k * avlen ; } int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { Ai [p] = p % avlen ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A converted from full to sparse", GB0) ; ASSERT (GB_IS_SPARSE (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_JUMBLED (A)) ; ASSERT (!GB_PENDING (A)) ; return (GrB_SUCCESS) ; }
mixed_tentusscher_myo_epi_2004_S2_15.c
// Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_15.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3587896185485,0.00134905676198612,0.774520199073544,0.774374680229778,0.000180109822242613,0.482910475890506,0.00298448938077068,0.999998277316546,2.00218048619546e-08,1.94462488964510e-05,0.999771668739369,1.00653067523994,0.999978756030136,5.82428650958820e-05,0.509754826080024,10.0682799523111,139.518219822205}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.3174380565213,0.000348866478117368,0.000146417624096425,0.000608670241592454,0.271667442889255,0.133939514082262,0.188380543281873,4.94330134063706,0.0151351354091834,2.05397398481996,1086.72577633731,0.000456799361732942,0.364037161557358,0.0196983207210989,0.00114898938065171,6.40398010166762e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
main.c
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <limits.h> #include <math.h> #include <time.h> #include <omp.h> #include "hashmap.h" #define BOARD_SIDE 3 // side of the board #define BOARD_LENGTH BOARD_SIDE * BOARD_SIDE // total length of an array representing a board #define MAX_DEPTH 20 // the default maximum depth for the DFS algo #define MAX_LEVEL 20 // the maximum number of OMP levels for recursion #define COLOR 0 // enable the colors in the terminal output #define DEBUG 0 // print the debug output int max_depth; // maximum dpeth for the dfs algorithm int best_depth; // the current depth where a solution has been found int *best_moves; // the array containing the current best path long iter = 0; // the iterations count /** * Enumeration describing the possible directions */ typedef enum direction direction; enum direction { UP, DOWN, LEFT, RIGHT }; /** * Type describing a move with its distance from the correct solution */ typedef struct { direction dir; int pos; int manhattan_distance; } move; /** * Print the square board * * @param board the board array * @param size the size of the board's side */ void print_board(const int *board, int size) { for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { int b = board[j + i * size]; if (b == 0 && COLOR) printf("\033[1;31m"); printf("%d ", b); if (b == 0 && COLOR) printf("\033[0m"); } printf("\n"); } } /** * Check if the board is a solution. The correct solution is: * * 1 2 3 * 4 5 6 * 7 8 0 * * @param board the board array to check * @return 1 if the solution is correct, 0 otherwise */ int check_solved(const int *board, int size) { if (board[size - 1] != 0) return 0; for (int i = 0; i <= size - 2; ++i) if (board[i] != i + 1) return 0; return 1; } /** * Create a copy of an array and swap two of its elements * @param origin pointer to the original array * @param dest pointer to the destination array * @param size size of the array * @param from first element to swap * @param to second element to swap */ void swap(const int *origin, int *dest, int size, int from, int to) { memcpy(dest, origin, size * sizeof(int)); int temp = dest[to]; dest[to] = dest[from]; dest[from] = temp; } /** * Shuffle an array randomly with a 0 at (0,0) * @param array a pointer to the board to shuffle * @param n the size of the array */ void shuffle(int *array, size_t n) { srand(time(NULL)); if (n > 1) { size_t i; for (i = 1; i < n - 1; i++) { size_t j = rand() % (i + 1); if (j == 0) j = 1; int temp = array[j]; array[j] = array[i]; array[i] = temp; } } } /** * Sum the Manhattan distances for each cell of the board while swapping the cell 0 with the cell direction * @param board pointer to the original array * @param length size of the array * @param side size of one side of the 2d array * @param direction element to swap with the cell 0 * @return Manhattan score */ size_t manhattan_distance(const int board[], const size_t length, const size_t side, int direction) { size_t total = 0; size_t i; for (i = 0; i < length; i++) { // Compute where the element should be int x = (board[i] - 1) % side; int y = (board[i] - 1) / side; // Element 0 goes at the end if(board[i] == 0) { x = BOARD_SIDE - 1; y = BOARD_SIDE - 1; } // Compute where the element is int x2 = i % side; int y2 = i / side; total += abs(x - x2) + abs(y - y2); } return total; } /** * Compare the Manhattan scores of two moves * @param a move 1 * @param b move 2 * @return to comparison between the two scores */ int cmp_manhattan_distances(const void *a, const void *b) { return ((move *) a)->manhattan_distance - ((move *) b)->manhattan_distance; } /** * Solve the 8-puzzle using the recursive depth-first traversal * * @param board the board array * @param depth the current depth of the recursion * @return a boolean indicating if a solution is found (1 for found, 0 otherwise) */ void solve_dfs(int board[], hashmap *hm, int* path, int depth) { #pragma omp atomic update iter++; if (iter % 1000 == 0 && DEBUG) printf("current depth: %d, best depth: %d, iterations: %ld, thread: %d\n", depth, best_depth, iter, omp_get_thread_num()); if (depth >= max_depth || depth >= best_depth) return; if (check_solved(board, BOARD_LENGTH)) { #pragma omp critical { if(best_depth > depth - 1){ best_depth = depth - 1; memcpy(best_moves, path, best_depth * sizeof(int)); } } return; if(DEBUG)printf("solved! Best depth: %d, iterations: %ld, thread: %d\n", best_depth, iter, omp_get_thread_num()); //print_board(board, BOARD_SIDE); } // calculate position of the 0 (empty cell) int pos; for (pos = 0; pos < BOARD_LENGTH; pos++) if (board[pos] == 0) break; // compute the different moves, -1 if not possible move directions[4]; directions[0].pos = pos % BOARD_SIDE == 0 ? -1 : pos - 1; directions[0].dir = LEFT; directions[1].pos = pos % BOARD_SIDE == BOARD_SIDE - 1 ? -1 : pos + 1; directions[1].dir = RIGHT; directions[2].pos = pos - BOARD_SIDE; directions[2].dir = UP; directions[3].pos = pos + BOARD_SIDE; directions[3].dir = DOWN; // compute Manhattan distances for (int i = 0; i < 4; i++) { board[pos] = board[directions[i].pos]; board[directions[i].pos] = 0; directions[i].manhattan_distance = directions[i].pos > 0 && directions[i].pos < BOARD_LENGTH ? manhattan_distance(board, BOARD_LENGTH, BOARD_SIDE, directions[i].pos) : INT_MAX; board[directions[i].pos] = board[pos]; board[pos] = 0; } // sort by manhattan distance qsort(directions, 4, sizeof(move), cmp_manhattan_distances); #pragma omp taskloop shared(hm) firstprivate(board) if(omp_get_level() < MAX_LEVEL) for (int i = 0; i < 4; i++) { int direction = directions[i].pos; if (direction >= 0 && direction < BOARD_LENGTH) { // Create new board and swap the 0 with a possible index int *new_board = malloc(BOARD_LENGTH * sizeof(int)); swap(board, new_board, BOARD_LENGTH, pos, direction); // Create a new path with the new index int* new_path = malloc((depth + 1) * sizeof(int)); memcpy(new_path, path, depth *sizeof(int)); new_path[depth] = directions[i].dir; if(hashmap_insert(hm, new_board, depth)) { solve_dfs(new_board, hm, new_path, depth + 1); } free(new_board); free(new_path); } } } /** * Parse a string of numbers into a usable grid. * @param board the array to fill * @param string the string to convertinto a board */ void parse_board(int *board, const char *string) { int length = strlen(string); int i; for (i = 0; i < length; i++) { board[i] = string[i] - '0'; } } /** * Print a .csv formatted string of the results of the algorithm * * @param time_taken the execution time * @param thread_count the number of threads */ void print_csv_report(double time_taken, int thread_count) { printf("%d;%d;%d;%ld;%f;%d\n", thread_count, best_depth, max_depth, iter, time_taken, best_depth == INT_MAX); } /** * Print a nice output of the results of the algorithm * * @param time_taken the execution time * @param thread_count the number of threads */ void print_text_report(double time_taken, int thread_count) { printf("==================================\n"); if (best_depth != INT_MAX) { printf("Solution found!\nthreads:\t%d\nbest depth:\t%d\nmax depth:\t%d\niterations:\t%ld\ntime(sec):\t%f\npath:\n\n", thread_count, best_depth, max_depth, iter, time_taken); char *dirs[] = {"UP", "DOWN", "LEFT", "RIGHT"}; for(int i = 0; i < best_depth + 1; i++){ printf(" %3d. %s\n",i, dirs[best_moves[i]]); } } else { printf("No solution found.\nthreads:\t%d\nmax depth:\t%d\niterations:\t%ld\ntime(sec):\t%f", thread_count, max_depth, iter, time_taken); } } /** * The program's entry point * @param argc arguments count * @param argv arguments value * @return the return code of the program */ int main(int argc, char const *argv[]) { best_depth = INT_MAX; // parsing the max depth argument if (argc >= 2) max_depth = atoi(argv[1]); else max_depth = MAX_DEPTH; // parsing the board argument int board[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; if (argc >= 3) { if (strlen(argv[2]) != 9) { printf("The given board is incorrect!\n"); exit(-1); } parse_board(board, argv[2]); } else { shuffle(board, (size_t) BOARD_LENGTH); } int thread_count = omp_get_num_procs(); // num of thread if (argc >= 4) thread_count = atoi(argv[3]); int output = 0; // type of output: 0 for text, 1 for csv if (argc >= 5) output = atoi(argv[4]); hashmap *h = hashmap_create(); best_moves = malloc(max_depth * sizeof(int)); if(!output) print_board(board, BOARD_SIDE); long t = clock(); #pragma omp parallel num_threads(thread_count) firstprivate(board) shared(h) { #pragma omp single nowait solve_dfs(board, h, NULL, 0); // start the search } t = clock() - t; double time_taken = ((double) t) / CLOCKS_PER_SEC; // in seconds if (output) { print_csv_report(time_taken, thread_count); } else { print_text_report(time_taken, thread_count); } free(best_moves); hashmap_free(h); return 0; }
GB_dense_subassign_21.c
//------------------------------------------------------------------------------ // GB_dense_subassign_21: C(:,:) = x where x is a scalar //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // C(:,:) = x where C is a matrix and x is a scalar #include "GB_dense.h" #include "GB_select.h" #include "GB_Pending.h" GrB_Info GB_dense_subassign_21 // C(:,:) = x; C is a matrix and x a scalar ( GrB_Matrix C, // input/output matrix const void *scalar, // input scalar const GrB_Type atype, // type of the input scalar GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT_MATRIX_OK (C, "C for C(:,:)=x", GB0) ; ASSERT (scalar != NULL) ; // any prior pending tuples are discarded, and all zombies will be killed ASSERT (GB_PENDING_OK (C)) ; ASSERT (GB_ZOMBIES_OK (C)) ; ASSERT_TYPE_OK (atype, "atype for C(:,:)=x", GB0) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- int64_t cvdim = C->vdim ; int64_t cvlen = C->vlen ; GrB_Index cnzmax ; bool ok = GB_Index_multiply (&cnzmax, cvlen, cvdim) ; if (!ok) { // problem too large return (GB_OUT_OF_MEMORY) ; } GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (cnzmax, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // typecast the scalar into the same type as C //-------------------------------------------------------------------------- int64_t csize = C->type->size ; GB_cast_function cast_A_to_C = GB_cast_factory (C->type->code, atype->code) ; GB_void cwork [GB_VLA(csize)] ; cast_A_to_C (cwork, scalar, atype->size) ; //-------------------------------------------------------------------------- // create the pattern, and allocate space for values, if needed //-------------------------------------------------------------------------- // discard any prior pending tuples GB_Pending_free (&(C->Pending)) ; int64_t pC ; if (GB_NNZ (C) < cnzmax || C->x_shallow || C->i_shallow || C->is_hyper || GB_ZOMBIES (C)) { //---------------------------------------------------------------------- // C is not yet dense: create pattern and allocate values //---------------------------------------------------------------------- // clear prior content and recreate it; use exising header for C. // do not malloc C->x if the scalar is zero; calloc it later. bool scalar_is_nonzero = GB_is_nonzero (cwork, csize) ; GB_PHIX_FREE (C) ; info = GB_create (&C, C->type, cvlen, cvdim, GB_Ap_malloc, C->is_csc, GB_FORCE_NONHYPER, C->hyper_ratio, C->vdim, cnzmax, scalar_is_nonzero, Context) ; if (info != GrB_SUCCESS) { // out of memory return (GB_OUT_OF_MEMORY) ; } int64_t *GB_RESTRICT Cp = C->p ; int64_t *GB_RESTRICT Ci = C->i ; int nth = GB_nthreads (cvdim, chunk, nthreads_max) ; // FUTURE:: dense data structure, where Cp and Ci will be implicit int64_t k ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = 0 ; k <= cvdim ; k++) { Cp [k] = k * cvlen ; } C->magic = GB_MAGIC ; C->nvec_nonempty = (cvlen == 0) ? 0 : cvdim ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (pC = 0 ; pC < cnzmax ; pC++) { Ci [pC] = pC % cvlen ; } if (!scalar_is_nonzero) { GBBURBLE ("calloc ") ; C->x = GB_CALLOC (cnzmax * csize, GB_void) ; } if (C->x == NULL) { // out of memory GB_PHIX_FREE (C) ; return (GB_OUT_OF_MEMORY) ; } if (!scalar_is_nonzero) { // quick return if the scalar is zero ASSERT_MATRIX_OK (C, "C(:,:)=0 output", GB0) ; return (GrB_SUCCESS) ; } } //-------------------------------------------------------------------------- // define the worker for the switch factory //-------------------------------------------------------------------------- // worker for built-in types #define GB_WORKER(ctype) \ { \ ctype *GB_RESTRICT Cx = (ctype *) C->x ; \ ctype x = (*(ctype *) cwork) ; \ GB_PRAGMA (omp parallel for num_threads(nthreads) schedule(static)) \ for (pC = 0 ; pC < cnzmax ; pC++) \ { \ Cx [pC] = x ; \ } \ } \ break ; //-------------------------------------------------------------------------- // launch the switch factory //-------------------------------------------------------------------------- switch (C->type->code) { case GB_BOOL_code : GB_WORKER (bool) ; case GB_INT8_code : GB_WORKER (int8_t) ; case GB_INT16_code : GB_WORKER (int16_t) ; case GB_INT32_code : GB_WORKER (int32_t) ; case GB_INT64_code : GB_WORKER (int64_t) ; case GB_UINT8_code : GB_WORKER (uint8_t) ; case GB_UINT16_code : GB_WORKER (uint16_t) ; case GB_UINT32_code : GB_WORKER (uint32_t) ; case GB_UINT64_code : GB_WORKER (uint64_t) ; case GB_FP32_code : GB_WORKER (float) ; case GB_FP64_code : GB_WORKER (double) ; case GB_FC32_code : GB_WORKER (GxB_FC32_t) ; case GB_FC64_code : GB_WORKER (GxB_FC64_t) ; default: { // worker for all user-defined types GB_BURBLE_N (cnzmax, "generic ") ; GB_void *GB_RESTRICT Cx = (GB_void *) C->x ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (pC = 0 ; pC < cnzmax ; pC++) { memcpy (Cx +((pC)*csize), cwork, csize) ; } } break ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C(:,:)=x output", GB0) ; return (GrB_SUCCESS) ; }
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #if defined(USE_MKL) && defined(_OPENMP) #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // USE_MKL && _OPENMP namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp { #if defined(USE_MKL) && defined(_OPENMP) static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + genImpl.rand() % 4096; // NOLINT(runtime/threadsafe_fn) const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed + my_offset); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } // MKL forward pass static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<cpu> *s, RandGenerator<cpu, DType> *pgen, const double pkeep, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases if (sizeof(DType) >= sizeof(int)) { Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; BernoulliGenerate(*pgen, count, pkeep, maskptr); const float pk_1 = 1.0f / pkeep; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { outptr[i] = dataptr[i] * maskptr[i] * pk_1; } return true; } return false; } // MKL backward pass static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<cpu> *s, const double pkeep, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { if (sizeof(DType) >= sizeof(int)) { Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; const float pk_1 = 1.0f / pkeep; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1; } return true; } return false; } #ifdef __CUDACC__ // GPU never uses MKL static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<gpu> *s, RandGenerator<gpu, DType> *pgen, const double pkeep, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { return false; } static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<gpu> *s, const double pkeep, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { return false; } #endif // __CUDACC__ #else // #if defined(USE_MKL) && defined(_OPENMP) static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<xpu> *s, RandGenerator<xpu, DType> *pgen, const double pkeep, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { return false; } static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<xpu> *s, const double pkeep, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { return false; } #endif // #if defined(USE_MKL) && defined(_OPENMP) public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(int id, RandGenerator<xpu, DType> gen, const int N, const int step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; void Init(const DropoutParam &param) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); } void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data) { if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &out = out_data[dropout::kOut]; if (ctx.is_train || this->mode_ == dropout::kAlways) { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); if (!MKLForward(s, pgen, this->pkeep_, in_data, out_data)) { const TBlob &mask = out_data[dropout::kMask]; CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in_data[dropout::kData].dptr<DType>(), this->pkeep_); } } else { const TBlob& data = in_data[dropout::kData]; if (req[dropout::kOut] == kWriteTo) { mxnet_op::copy(s, out, data); } else { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), data.dptr<DType>()); }); } } } } void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); if (ctx.is_train || mode_ == dropout::kAlways) { if (!MKLBackward(s, this->pkeep_, in_grad, out_data, out_grad)) { const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; if (req[dropout::kData] == kWriteTo) { mxnet_op::copy(s, gdata, grad); } else { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; }; // class DropoutOp template<typename xpu> void DropoutCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { static thread_local DropoutOp<xpu, DType> op; op.Init(param); op.Forward(ctx, inputs, req, outputs); }); } //template<typename xpu> //void DropoutGradCompute(const nnvm::NodeAttrs& attrs, // const OpContext& ctx, // const std::vector<TBlob>& inputs, // const std::vector<OpReqType>& req, // const std::vector<TBlob>& outputs) { // const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed); // CHECK_EQ(inputs.size(), 2U); // CHECK_EQ(outputs.size(), 1); // CHECK_EQ(req.size(), 1); // std::vector<TBlob> out_grads(2); // std::vector<TBlob> out_data(2); // out_grads[dropout::kOut] = inputs[0]; // out_data[dropout::kMask] = inputs[1]; // // MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { // static thread_local DropoutOp<xpu, DType> op; // op.Init(param); // op.Backward(ctx, out_grads, out_data, req, outputs); // }); //} } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
GB_unop__expm1_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__expm1_fc64_fc64) // op(A') function: GB (_unop_tran__expm1_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cexpm1 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexpm1 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cexpm1 (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXPM1 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__expm1_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexpm1 (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexpm1 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__expm1_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
psocpp.h
/** psocpp.h * * Author: Fabian Meyer * Created on: 08 Jan 2019 * License: MIT */ #ifndef PSOCPP_PSOCPP_H_ #define PSOCPP_PSOCPP_H_ #include <Eigen/Geometry> #include <stdexcept> #include <limits> #include <functional> #include <iostream> #include <ctime> #include <iomanip> #include <random> namespace pso { /** Integer type for indexing arrays, vectors and matrices. */ typedef long int Index; /** @brief Dummy callback functor, which always and only returns true. */ template<typename Scalar> class NoCallback { public: typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; bool operator()(const Index, const Matrix&, const Vector &, const Index) const { return true; } }; /** @brief Inertia weight functor, which returns a constant weight. */ template<typename Scalar> class ConstantWeight { public: typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; private: Scalar weight_; public: ConstantWeight() : ConstantWeight(1.0) { } /** Constructor, which accepts the weight that is returned by the functor. * @param weight constant which will be returned as inertia weight */ ConstantWeight(const Scalar weight) : weight_(weight) { } Scalar operator()(const Index, const Index) const { return weight_; } }; /** @brief Inertia weight functor, which decreases linearly over time. * * The inertia weight is calculated by the following formula: * * w = wMin + (wMax - wMin) * (t / tMax) */ template<typename Scalar> class LinearDecrease { public: typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; private: Scalar weightMin_; Scalar weightMax_; public: LinearDecrease() : LinearDecrease(0.4, 0.9) { } /** @brief Constructor, which accepts the minimum and maximum weight of * the linear decrease. * * The returned inertia weight always lies in the interval [minval, maxval]. * @param minval lower bound of the inertia weight * @param maxval upper bound of the inertia weight */ LinearDecrease(const Scalar minval, const Scalar maxval) : weightMin_(minval), weightMax_(maxval) { } Scalar operator()(const Index iteration, const Index maxIt) const { Scalar factor = static_cast<Scalar>(iteration) / static_cast<Scalar>(maxIt); return weightMin_ + (weightMax_ - weightMin_) * factor; } }; /** @brief Inertia weight functor, which decreases exponentially over time. * * The inertia weight is calculated by the following formula: * * w = wMin + (wMax - wMin) * exp(-t / (tMax / 10)) */ template<typename Scalar> class ExponentialDecrease1 { public: typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; private: Scalar weightMin_; Scalar weightMax_; public: ExponentialDecrease1() : ExponentialDecrease1(0.4, 0.9) { } /** Constructor, which accepts the minimum and maximum weight of the * exponential decrease. * The returned inertia weight always lies in the interval [minval, maxval]. * @param minval lower bound of the inertia weight * @param maxval upper bound of the inertia weight */ ExponentialDecrease1(const Scalar minval, const Scalar maxval) : weightMin_(minval), weightMax_(maxval) { } Scalar operator()(const Index iteration, const Index maxIt) const { Scalar exponent = static_cast<Scalar>(iteration) / (static_cast<Scalar>(maxIt) / 10.0); return weightMin_ + (weightMax_ - weightMin_) * std::exp(-exponent); } }; /** @brief Inertia weight functor, which decreases exponentially over time. * * The inertia weight is calculated by the following formula: * * w = wMin + (wMax - wMin) * exp(-(t / (tMax / 4))^2) */ template<typename Scalar> class ExponentialDecrease2 { public: typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; private: Scalar weightMin_; Scalar weightMax_; public: ExponentialDecrease2() : ExponentialDecrease2(0.4, 0.9) { } /** Constructor, which accepts the minimum and maximum weight of the * exponential decrease. * The returned inertia weight always lies in the interval [minval, maxval]. * @param minval lower bound of the inertia weight * @param maxval upper bound of the inertia weight */ ExponentialDecrease2(const Scalar minval, const Scalar maxval) : weightMin_(minval), weightMax_(maxval) { } Scalar operator()(const Index iteration, const Index maxIt) const { Scalar exponent = static_cast<Scalar>(iteration) / (static_cast<Scalar>(maxIt) / 4.0); exponent *= exponent; return weightMin_ + (weightMax_ - weightMin_) * std::exp(-exponent); } }; /** @brief Inertia weight functor, which decreases exponentially over time. * * The inertia weight is calculated by the following formula: * * w = (wMax - wMin - d1) * exp(1 / (1 + d2 t / tMax)) */ template<typename Scalar> class ExponentialDecrease3 { public: typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; private: Scalar weightMin_; Scalar weightMax_; /** Control factors */ Scalar d1_; Scalar d2_; public: ExponentialDecrease3() : ExponentialDecrease3(0.4, 0.95, 0.2, 7.0) { } /** Constructor, which accepts the minimum and maximum weight and two * control factors of the exponential decrease. * The returned inertia weight always lies in the interval [minval, maxval]. * @param minval lower bound of the inertia weight * @param maxval upper bound of the inertia weight * @param d1 first control factor * @param d2 second control factor */ ExponentialDecrease3(const Scalar minval, const Scalar maxval, const Scalar d1, const Scalar d2) : weightMin_(minval), weightMax_(maxval), d1_(d1), d2_(d2) { } Scalar operator()(const Index iteration, const Index maxIt) const { Scalar itFac = static_cast<Scalar>(iteration) / static_cast<Scalar>(maxIt); Scalar exponent = 1.0 / (1.0 + d2_ * itFac); return (weightMax_ - weightMin_ - d1_) * std::exp(exponent); } }; /** @brief Implements the paricle swarm optimization agorithm. * * The optimization process can be configured by providing an inertia * weight strategy functor and a callback. * * The inertia weight functor determines the amount of velocity, which is * is maintained from the previous iterations. It has a huge effect on * convergence speed and stability of the optimization. * * The callback functor is called after each iteration and returns a boolean. * If it returns false the optimization process is stopped. As such, the * callback allows to implement additional stop criteria. */ template<typename Scalar, typename Objective, typename InertiaWeightStrategy = ConstantWeight<Scalar>, typename Callback = NoCallback<Scalar> > class ParticleSwarmOptimization { public: typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; struct Result { Index iterations; bool converged; Scalar fval; Vector xval; }; private: Objective objective_; Callback callback_; InertiaWeightStrategy weightStrategy_; Index threads_; Index maxIt_; Scalar xeps_; Scalar feps_; Scalar phip_; Scalar phig_; Scalar maxVel_; Index verbosity_; std::function<Scalar()> dice_; template<typename Derived> std::string vector2str(const Eigen::MatrixBase<Derived> &vec) const { std::stringstream ss1; ss1 << std::fixed << std::showpoint << std::setprecision(6); std::stringstream ss2; ss2 << '['; for(Index i = 0; i < vec.size(); ++i) { ss1 << vec(i); ss2 << std::setfill(' ') << std::setw(10) << ss1.str(); if(i != vec.size() - 1) ss2 << ' '; ss1.str(""); } ss2 << ']'; return ss2.str(); } void randomizeParticles(const Matrix &bounds, Matrix &particles) { for(Index i = 0; i < particles.cols(); ++i) { for(Index j = 0; j < particles.rows(); ++j) { Scalar minval = bounds(0, j); Scalar maxval = bounds(1, j); Scalar diff = maxval - minval; particles(j, i) = minval + (dice_() * diff); } } } void randomizeVelocities(const Matrix &bounds, Matrix &velocities) { for(Index i = 0; i < velocities.cols(); ++i) { for(Index j = 0; j < velocities.rows(); ++j) { Scalar minval = bounds(0, j); Scalar maxval = bounds(1, j); Scalar diff = maxval - minval; Scalar vel = -diff + (dice_() * 2 * diff); velocities(j, i) = std::min(maxVel_, std::max(-maxVel_, vel)); } } } void evaluateObjective(const Matrix &particles, Vector &fvals) { #pragma omp parallel for num_threads(threads_) for(Index i = 0; i < particles.cols(); ++i) fvals(i) = objective_(particles.col(i)); } void maintainBounds(const Matrix &bounds, Matrix &particles) const { for(Index i = 0; i < particles.cols(); ++i) { for(Index j = 0; j < particles.rows(); ++j) { Scalar minval = bounds(0, j); Scalar maxval = bounds(1, j); Scalar val = particles(j, i); particles(j, i) = std::min(maxval, std::max(minval, val)); } } } void calculateVelocities(const Matrix &particles, const Matrix &bestParticles, const Index gbest, const Index iteration, Matrix &velocities) { assert(velocities.rows() == particles.rows()); assert(velocities.cols() == particles.cols()); assert(velocities.rows() == bestParticles.rows()); assert(velocities.cols() == bestParticles.cols()); assert(gbest < bestParticles.cols()); Scalar weight = weightStrategy_(iteration, maxIt_); for(Index i = 0; i < velocities.cols(); ++i) { for(Index j = 0; j < velocities.rows(); ++j) { Scalar velp = dice_() * (bestParticles(j, i) - particles(j, i)); Scalar velg = dice_() * (bestParticles(j, gbest) - particles(j, i)); Scalar vel = weight * velocities(j, i) + phip_ * velp + phig_ * velg; if(maxVel_ > 0) vel = std::min(maxVel_, std::max(-maxVel_, vel)); velocities(j, i) = vel; } } } Result _minimize(const Matrix &bounds, Matrix &particles) { Matrix velocities(particles.rows(), particles.cols()); Vector fvals(particles.cols()); Matrix bestParticles = particles; Vector bestFvals(particles.cols()); Matrix prevParticles(particles.rows(), particles.cols()); Vector prevFvals(particles.cols()); Vector diff(particles.rows()); Index gbest = 0; // initialize velocities randomly randomizeVelocities(bounds, velocities); // evaluate objective function for the initial particles evaluateObjective(particles, fvals); bestFvals = fvals; bestFvals.minCoeff(&gbest); // init stop conditions Index iterations = 0; Scalar fchange = feps_ + 1; Scalar xchange = xeps_ + 1; while((maxIt_ == 0 || iterations < maxIt_) && fchange > feps_ && xchange > xeps_) { // calculate new velocities calculateVelocities(particles, bestParticles, gbest, iterations, velocities); // move particles by velocity and stay within bounds particles += velocities; maintainBounds(bounds, particles); // evaluate objective for moved particles evaluateObjective(particles, fvals); prevParticles = bestParticles; prevFvals = bestFvals; for(Index i = 0; i < fvals.size(); ++i) { // check if there was an improvement and update best vals if(fvals(i) < bestFvals(i)) { bestFvals(i) = fvals(i); bestParticles.col(i) = particles.col(i); } } bestFvals.minCoeff(&gbest); // calculate new diffs xchange = (bestParticles - prevParticles).colwise().norm().sum(); fchange = (bestFvals - prevFvals).array().abs().sum(); xchange /= bestParticles.cols(); fchange /= bestFvals.size(); // evaluate callback and save its result bool callbackResult = callback_(iterations, bestParticles, bestFvals, gbest); if(verbosity_ > 0) { std::stringstream ss; ss << "it=" << std::setfill('0') << std::setw(4) << iterations << std::fixed << std::showpoint << std::setprecision(6) << " fchange=" << fchange << " xchange=" << xchange; if(verbosity_ > 2) ss << " callback=" << (callbackResult ? "true" : "false"); ss << " fval=" << bestFvals(gbest); if(verbosity_ > 1) ss << " xval=" << vector2str(bestParticles.col(gbest)); std::cout << ss.str() << std::endl;; } ++iterations; } Result result; result.iterations = iterations; result.converged = fchange <= feps_ || xchange <= xeps_; result.fval = bestFvals(gbest); result.xval = bestParticles.col(gbest); return result; } public: ParticleSwarmOptimization() : objective_(), callback_(), weightStrategy_(), threads_(1), maxIt_(0), xeps_(static_cast<Scalar>(1e-6)), feps_(static_cast<Scalar>(1e-6)), phip_(static_cast<Scalar>(2.0)), phig_(static_cast<Scalar>(2.0)), maxVel_(static_cast<Scalar>(0.0)), verbosity_(0), dice_() { std::default_random_engine gen(std::time(0)); std::uniform_real_distribution<Scalar> distrib(0.0, 1.0); dice_ = std::bind(distrib, gen); } /** Set the amount of threads, which are used for evaluating the * individual particles (OMP only). * Set to 0 or negative to allow auto detection. * @param threads maximum number of threads for evaluation */ void setThreads(const Index threads) { threads_ = threads; } /** Set the maximum number of iterations. * Set to 0 or negative for infinite iterations. * @param iterations maximum number of iterations */ void setMaxIterations(const Index iterations) { maxIt_ = iterations; } /** Set the minimum average change of particles per iteration. * If the average change of particles (input parameters) falls below * this value, the optimization terminates. * @param change minimum change of input paramaters */ void setMinParticleChange(const Scalar change) { xeps_ = change; } /** Set the minimum average change of function values per iteration. * If the average change of functions values falls below * this value, the optimization terminates. * @param change minimum change of function values */ void setMinFunctionChange(const Scalar change) { feps_ = change; } /** Set the tendency of particles to move towards their local optimum * found so far. * Each particle individually maintains a memory of where it has * visited the lowest function value so far. * Increasing this value increases the particles' tendency to move * towards that point. * @param phip tendency to move towards individual optimum */ void setPhiParticles(const Scalar phip) { phip_ = phip; } /** Set the tendency of particles to move towards the global optimum * found so far. * The swarm maintains a collective memory of where it has visited the * lowest function value so far. * Increasing this value increases the particles' tendency to move * towards that point. * @param phig tendency to move towards collective optimum */ void setPhiGlobal(const Scalar phig) { phig_ = phig; } /** Set an upper bound for the velocity of particles. * A particle cannot move faster than this value, which may prevent * divergence. * @param maxvel maximum velocity of a particle */ void setMaxVelocity(const Scalar maxvel) { maxVel_ = maxvel; } /** Set the level of verbosity during optimization. * Verbosity increases with increasing value. * 0 means no output and it can be raised up to level 3. * @param verbosity level of verbosity */ void setVerbosity(const Index verbosity) { verbosity_ = verbosity; } void setObjective(const Objective &objective) { objective_ = objective; } void setCallback(const Callback &callback) { callback_ = callback; } void setInertiaWeightStrategy(const InertiaWeightStrategy &weightStrategy) { weightStrategy_ = weightStrategy; } /** Perform minimization with the given bounds and number of particels. * * The swarm of particles will be drawn uniform randomly within the * given bounds. * * The bounds matrix has to have 2 rows and one column per dimension * of particle. The first row holds the minimum value of the respective * dimension and the second row holds the maximum value. * * @param bounds 2xM matrix for bounds of M-dimensional particles * @param cnt number of particles used for optimization */ Result minimize(const Matrix &bounds, const Index cnt) { if(cnt == 0) throw std::runtime_error("particle count cannot be 0"); if(bounds.rows() != 2) throw std::runtime_error("bounds has not exactly 2 rows (min, max)"); for(Index i = 0; i < bounds.cols(); ++i) { if(bounds(0, i) >= bounds(1, i)) throw std::runtime_error("bounds min is greater than max"); } Matrix particles(bounds.cols(), cnt); randomizeParticles(bounds, particles); return _minimize(bounds, particles); } /** Perform minimization with the given bounds, number of particels and * initial guess. * * The swarm of particles will be drawn uniform randomly within the * given bounds. * * The bounds matrix has to have 2 rows and one column per dimension * of particle. The first row holds the minimum value of the respective * dimension and the second row holds the maximum value. * * The initial guess vector has to have the same length as the number * of columns of the bounds. It will be included as one particle of * the swarm. * * @param bounds 2xM matrix for bounds of M-dimensional particles * @param cnt number of particles used for optimization * @param initGuess initial guess for a particle */ Result minimize(const Matrix &bounds, const Index cnt, const Vector &initGuess) { if(cnt == 0) throw std::runtime_error("particle count cannot be 0"); if(bounds.rows() != 2) throw std::runtime_error("bounds has not exactly 2 rows (min, max)"); for(Index i = 0; i < bounds.cols(); ++i) { if(bounds(0, i) >= bounds(1, i)) throw std::runtime_error("bounds min is greater than max"); } if(bounds.cols() != initGuess.size()) throw std::runtime_error("init guess and bounds have different dimensions"); Matrix particles(bounds.cols(), cnt); randomizeParticles(bounds, particles); particles.col(0) = initGuess; maintainBounds(bounds, particles); return _minimize(bounds, particles); } /** Perform minimization with the given bounds and a pre-computed * swarm of particles. * * The bounds matrix has to have 2 rows and one column per dimension * of particle. The first row holds the minimum value of the respective * dimension and the second row holds the maximum value. * * @param bounds 2xM matrix for bounds of M-dimensional particles * @param particles initial swarm used for optimization */ Result minimize(const Matrix &bounds, Matrix &particles) { if(bounds.rows() != 2) throw std::runtime_error("bounds has not exactly 2 rows (min, max)"); if(bounds.cols() != particles.rows()) throw std::runtime_error("columns of bounds and rows of " "particles do not match"); for(Index i = 0; i < bounds.cols(); ++i) { if(bounds(0, i) >= bounds(1, i)) throw std::runtime_error("bounds min is greater than max"); } maintainBounds(bounds, particles); return _minimize(bounds, particles); } void getRandomParticles(const Matrix &bounds, const Index cnt, Matrix &particles) { particles.resize(bounds.cols(), cnt); randomizeParticles(bounds, particles); } }; } #endif
GB_unaryop__abs_int32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_fp64 // op(A') function: GB_tran__abs_int32_fp64 // C type: int32_t // A type: double // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ double #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int32_t z ; GB_CAST_SIGNED(z,aij,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_fp64 ( int32_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ''fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ''classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "MagickCore/studio.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { double center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { double tau; ssize_t left, right; double mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { double tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static double OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const double,double *), ZeroCrossHistogram(double *,const double,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const double cluster_threshold, % const double weighting_exponent, % const MagickBooleanType verbose,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType Classify(Image *image,short **extrema, const double cluster_threshold, const double weighting_exponent,const MagickBooleanType verbose, ExceptionInfo *exception) { #define SegmentImageTag "Segment/Image" #define ThrowClassifyException(severity,tag,label) \ {\ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \ { \ next_cluster=cluster->next; \ cluster=(Cluster *) RelinquishMagickMemory(cluster); \ } \ if (squares != (double *) NULL) \ { \ squares-=255; \ free_squares=squares; \ free_squares=(double *) RelinquishMagickMemory(free_squares); \ } \ ThrowBinaryException(severity,tag,label); \ } CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickOffsetType progress; double *free_squares; MagickStatusType status; register ssize_t i; register double *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; squares=(double *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowClassifyException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (double *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(double) i*(double) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *clust; register const PixelInfo *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,0,q); for (clust=head; clust != (Cluster *) NULL; clust=clust->next) { if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >= (clust->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <= (clust->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >= (clust->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <= (clust->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >= (clust->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <= (clust->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(image,(Quantum) clust->id,q); break; } } if (clust == (Cluster *) NULL) { double distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(image,(Quantum) j,q); } } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image,exception); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(double *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const double *histogram, % double *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of doubles is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const double *histogram, double *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % PixelInfo *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, PixelInfo *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; double threshold; register const Quantum *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetPixelInfo(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const Quantum *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++; p+=GetPixelChannels(image); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register double sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(double) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % double OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static double OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; double average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(double *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(double) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(double *) RelinquishMagickMemory(derivative); second_derivative=(double *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(double) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const double tau, % double *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const double tau, double *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateGammaMap"); alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI)); beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=alpha*sum; } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold, ExceptionInfo *exception) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace,exception); InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose, exception); (void) TransformImageColorspace(image,previous_colorspace,exception); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(double *second_derivative, % const double smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of doubles representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(double *second_derivative, const double smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
omp_ssymm_batch.c
/** * @file omp_ssymm_batch.c * * @brief BBLAS omp_ssymm_batch float routine. * * BBLAS is a software package provided by Univ. of Manchester, * Univ. of Tennessee. * * @version 1.0.0 * @author Samuel D. Relton * @author Pedro V. Lara * @author Mawussi Zounon * @date 2016-02-20 * **/ #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Code generation * @generated from ./bblas_omp/omp_zsymm_batch.c normal z -> s, Mon Jun 6 09:44:14 2016 **/ #endif #include<cblas.h> #include "bblas_omp.h" #include "bblas.h" #include <omp.h> #define REAL /** Purpose ------- <b>ssymm_batch</b> is an OpenMP version of ssymm_batch. It performs one of the matrix-matrix operations arrayC[i] = alpha[i]*arrayA[i]*arrayB[i] + beta[i]*arrayC[i], or arrayC[i] = alpha[i]*arrayB[i]*arrayA[i] + beta[i]*arrayC[i], where alpha[i] and beta[i] are scalars, arrayA[i] is a symmetric matrix and arrayB[i] and arrayC[i] are M[i] by N[i] matrices. Fixed and Variable Batch Operations ----------------------------------- Two types of batch operation are supported depending upon the value of batch_opts. When <tt>batch_opts = BBLAS_VARIABLE</tt> - all parameters that are arrays must have length at least batch_count. - all parameters that are arrays must have all values set. When <tt>batch_opts = BBLAS_FIXED</tt> - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) must have length at least one. - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) need only to have their first value set. This means that for a <tt>BBLAS_FIXED</tt> batch, the values of side[0], uplo[0], M[0], N[0], alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations. Parameters ---------- @param[in] side Array of <tt>enum BBLAS_SIDE</tt>. Each element side[i] specifies whether the symmetric matrix arrayA[i] appears on the left or right side of the operation as follows: - = 'BblasLeft' arrayC[i] = alpha[i]*arrayA[i]*arrayB[i] + beta[i]*arrayC[i]. - = 'BblasRight' arrayC[i] = alpha[i]*arrayB[i]*arrayA[i] + beta[i]*arrayC[i]. @param[in] uplo Array of <tt>enum BBLAS_UPLO</tt>. On entry, uplo[i] specifies whether the upper or lower triangular part of the symmetric matrix arrayA[i] is to be referenced as follows: - = 'BblasUpper' Only the upper triangular part of arrayA[i] is to be referenced. - = 'BblasLower' Only the lower triangular part of arrayA[i] is to be referenced. @param[in] M Array of <tt>int</tt>. Each element M[i] specifies the number of rows of the matrix arrayC[i]. M[i] must be greater than zero. @param[in] N Array of <tt>int</tt>. Each element N[i] specifies the number of columns of the matrix arrayC[i]. N[i] must be greater than zero. @param[in] alpha Array of <tt>real_16</tt>. @param[in] arrayA Array of pointers. Each element arrayA[i] is a pointer to a REAL matrix of dimension lda[i] by Ka[i], where Ka[i] = M[i] when side[i] = BblasLeft and is N[i] otherwise. When using side[i] = BblasLeft the M[i] by M[i] part of arrayA[i] must contain the symmetric matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the upper triangular part of the symmetric matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the lower triangular part of the symmetric matrix whilst the strictly upper triangular part is not used. When using side[i] = BblasRight the N[i] by N[i] part of arrayA[i] must contain the symmetric matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the upper triangular part of the symmetric matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the lower triangular part of the symmetric matrix whilst the strictly upper triangular part is not used. @param[in] lda Array of <tt>int</tt>. On entry, lda[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When side[i] = BblasLeft then lda[i] must be at least max( 1, M[i] ), otherwise lda[i] must be at least max( 1, N[i] ). @param[in] arrayB Array of pointers. Each element arrayB[i] is a pointer to a REAL matrix of dimension ldb[i] by N[i]. The leading M[i] by N[i] part of arrayB[i] must contain the matrix elements. @param[in] ldb Array of <tt>int</tt>. Each element ldb[i] specifies the first dimension of arrayB[i] as declared in the calling (sub) program. Each element ldb[i] must be at least max( 1, M[i] ). @param[in] beta Array of <tt>real_16</tt>. When beta[i] is set to zero arrayC[i] need not be set on input. @param[in,out] arrayC Array of pointers. Each element arrayC[i] is a pointer to a REAL matrix of dimension ldc[i] by N[i]. Before entry, the leading M[i] by N[i] part of the arrayC[i] must contain a matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the matrix arrayC[i] is overwritten by the M[i] by N[i] matrix output. @param[in] ldc Array of <tt>int</tt>. Each element ldc[i] specifies the first dimension of arrayC[i] as declared in the calling (sub) program. The value ldc[i] must be at least max( 1, M[i] ). @param[in] batch_count <tt>int</tt> The number of matrices to operate on. @param[in] batch_opts <tt>enum BBLAS_OPTS</tt> One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of batch operation required. @param[out] info Array of <tt>int</tt>. Each element info[i] is the error return code of the ith zymm in the batch, these need not be set on entry. The error codes can be found in bblas_macros.h. **/ void omp_ssymm_batch( const enum BBLAS_SIDE *side, const enum BBLAS_UPLO *uplo, const int *M, const int *N, const float *alpha, const float **arrayA, const int *lda, const float **arrayB, const int *ldb, const float *beta, float **arrayC, const int *ldc, const int batch_count, const enum BBLAS_OPTS batch_opts, int *info) { /*Local variables */ int first_index = 0; int batch_iter; int LDA; char func_name[15] = "ssymm_batch"; /* Check input arguments */ if (batch_count < 0) { xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1); } if (batch_opts == BBLAS_FIXED) { if ((side[first_index] != BblasLeft) && (side[first_index] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_SIDE; } return; } if ((uplo[first_index] != BblasUpper) && (uplo[first_index] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_UPLO; } return; } if (M[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_M; } return; } if (N[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_N; } return; } if (side[first_index] == BblasLeft) { LDA = M[first_index]; } else { LDA = N[first_index]; } if (lda[first_index] < LDA) { xerbla_batch(func_name, BBLAS_ERR_LDA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDA; } return; } if (ldb[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDB; } return; } if (ldc[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDC, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDC; } return; } /* particular case */ if (M[first_index] == 0 || N[first_index] == 0 || (alpha[first_index] == (float)0.0 && beta[first_index] == (float)1.0)) { for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_SUCCESS; } return; } #pragma omp parallel for private( batch_iter) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /*Call to cblas_ssymm */ cblas_ssymm( BblasColMajor, side[first_index], uplo[first_index], M[first_index], N[first_index], (alpha[first_index]), arrayA[batch_iter], lda[first_index], arrayB[batch_iter], ldb[first_index], (beta[first_index]), arrayC[batch_iter], ldc[first_index]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } /*END FIXED SIZE FOR LOOP */ }else if (batch_opts == BBLAS_VARIABLE) { #pragma omp parallel for private( batch_iter, LDA) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /* Check input arguments */ if ((side[batch_iter] != BblasLeft) && (side[batch_iter] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, batch_iter); info[batch_iter] = BBLAS_ERR_SIDE; continue; } if ((uplo[batch_iter] != BblasUpper) && (uplo[batch_iter] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter); info[batch_iter] = BBLAS_ERR_UPLO; continue; } if (M[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, batch_iter); info[batch_iter] = BBLAS_ERR_M; continue; } if (N[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, batch_iter); info[batch_iter] = BBLAS_ERR_N; continue; } if (side[batch_iter] == BblasLeft) { LDA = M[batch_iter]; } else { LDA = N[batch_iter]; } if (lda[batch_iter] < LDA) { xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter); info[batch_iter] = BBLAS_ERR_LDA; continue; } if (ldb[batch_iter] < max(1, M[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter); info[batch_iter] = BBLAS_ERR_LDB; continue; } if (ldc[batch_iter] < max(1, M[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter); info[batch_iter] = BBLAS_ERR_LDC; continue; } /* particular case */ if (M[batch_iter] == 0 || N[batch_iter] == 0 || (alpha[batch_iter] == (float)0.0 && beta[batch_iter] == (float)1.0)) { info[batch_iter] = BBLAS_SUCCESS; continue; } cblas_ssymm( BblasColMajor, side[batch_iter], uplo[batch_iter], M[batch_iter], N[batch_iter], (alpha[batch_iter]), arrayA[batch_iter], lda[batch_iter], arrayB[batch_iter], ldb[batch_iter], (beta[batch_iter]), arrayC[batch_iter], ldc[batch_iter]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } }else { xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1); } } #undef REAL
rt_dgeam.c
#include "runtime.h" #ifdef PLASMA_WITH_SMP #pragma omp target device (smp) copy_deps #pragma omp task in([lda*n]A) inout([ldb*n]B) label(dgeam) void CORE_dgeam_ompss( PLASMA_enum transA, PLASMA_enum transB, int m, int n, int nb, double alpha, double *A, int lda, double beta, double *B, int ldb) { CORE_dgeam(transA, transB, m, n, nb, alpha, A, lda, beta, B, ldb); } #endif // CUDA support (hybrid) #ifdef PLASMA_WITH_CUDA_HYBRID #pragma omp target device (smp) copy_deps #pragma omp task in([lda*n]A) inout([ldb*n]B) label(dgeam) void CORE_dgeam_ompss( PLASMA_enum transA, PLASMA_enum transB, int m, int n, int nb, double alpha, double *A, int lda, double beta, double *B, int ldb) { CORE_dgeam(transA, transB, m, n, nb, alpha, A, lda, beta, B, ldb); } //Alternative implementations #pragma omp target device (cuda) copy_deps #pragma omp task in([lda*n]A) inout([ldb*n]B) label(dgeam) void CORE_dgeam_cuda( PLASMA_enum transA, PLASMA_enum transB, int m, int n, int nb, double alpha, double *A, int lda, double beta, double *B, int ldb) { cublasOperation_t trans0, trans1; if ( transA == PlasmaNoTrans) trans0 = CUBLAS_OP_N; else trans0 = CUBLAS_OP_T; if ( transB == PlasmaNoTrans) trans1 = CUBLAS_OP_N; else trans1 = CUBLAS_OP_T; cublasHandle_t handle = nanos_get_cublas_handle(); cudaStream_t stream = nanos_get_kernel_execution_stream(); cublasSetStream(handle, stream); cublasDgeam(handle, trans0, trans1, m, n, &alpha, A, lda, &beta, B, ldb, B, ldb); } #endif // CUDA support (pure) #ifdef PLASMA_WITH_CUDA_PURE #pragma omp target device (cuda) copy_deps #pragma omp task in([lda*n]A) inout([ldb*n]B) label(dgeam) void CORE_dgeam_ompss( PLASMA_enum transA, PLASMA_enum transB, int m, int n, int nb, double alpha, double *A, int lda, double beta, double *B, int ldb) { cublasOperation_t trans0, trans1; if ( transA == PlasmaNoTrans) trans0 = CUBLAS_OP_N; else trans0 = CUBLAS_OP_T; if ( transB == PlasmaNoTrans) trans1 = CUBLAS_OP_N; else trans1 = CUBLAS_OP_T; cublasHandle_t handle = nanos_get_cublas_handle(); cudaStream_t stream = nanos_get_kernel_execution_stream(); cublasSetStream(handle, stream); cublasDgeam(handle, trans0, trans1, m, n, &alpha, A, lda, &beta, B, ldb, B, ldb); } #endif void RT_CORE_dgeam(Quark *quark, Quark_Task_Flags *task_flags, PLASMA_enum transA, PLASMA_enum transB, int m, int n, int nb, double alpha, double *A, int lda, double beta, double *B, int ldb) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dgeam(quark, task_flags, transA, transB, m, n, nb, alpha, A, lda, beta, B, ldb); } else if (plasma->runtime == PLASMA_OMPSS) { CORE_dgeam_ompss(transA, transB, m, n, nb, alpha, A, lda, beta, B, ldb); } }
GB_transpose.c
//------------------------------------------------------------------------------ // GB_transpose: C=A' or C=op(A'), with typecasting //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // CALLS: GB_builder // Transpose a matrix, C=A', and optionally apply a unary operator and/or // typecast the values. The transpose may be done in place, in which case C or // A are modified in place. If the matrix to be transposed has more than one // vector, it may have jumbled indices in its vectors, which must be sorted. // If the input matrix has a single vector, it must be already sorted on input. // The input matrix may have shallow components (even if in place), and the // output may also have shallow components (even in the input matrix is not // shallow). // This function is CSR/CSC agnostic; it sets the output matrix format from // C_is_csc but otherwise ignores the CSR/CSC type of A and C. // If A_in is NULL, then C = (*Chandle) is transposed in place. If out of // memory, (*Chandle) is always returned as NULL, which frees the input matrix // C if the transpose is done in place. // If A_in is not NULL and Chandle is NULL, then A is modified in place, and // the A_in matrix is not freed when done. // The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is // m-by-n, then at most O(e/n) threads are used. For many matrices, e is O(n), // although the constant can be high. The qsort method is more scalable, but // not as fast with a modest number of threads. #include "GB_transpose.h" #include "GB_build.h" #include "GB_apply.h" GrB_Info GB_transpose // C=A', C=(ctype)A or C=op(A') ( GrB_Matrix *Chandle, // output matrix C, possibly modified in place GrB_Type ctype, // desired type of C; if NULL use A->type. // ignored if op is present (cast to op->ztype) const bool C_is_csc, // desired CSR/CSC format of C const GrB_Matrix A_in, // input matrix const GrB_UnaryOp op_in, // optional operator to apply to the values GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs and determine if transpose is done in place //-------------------------------------------------------------------------- bool in_place_C, in_place_A ; GrB_Matrix A, C ; if (A_in == NULL) { //---------------------------------------------------------------------- // C = C' ; &C is transposed in place //---------------------------------------------------------------------- // GB_transpose (&C, ctype, csc, NULL, op) ; // C=A' is transposed in place, in the matrix C. // The matrix C is freed if an error occurs and C is set to NULL. ASSERT (Chandle != NULL) ; // at least &C or A must be non-NULL A = (*Chandle) ; C = A ; // C must be freed if an error occurs in_place_C = true ; // C is modified in place in_place_A = false ; ASSERT (A == C && A == (*Chandle)) ; } else if (Chandle == NULL || (*Chandle) == A_in) { //---------------------------------------------------------------------- // A = A' ; A is transposed in place; reuse the header of A //---------------------------------------------------------------------- // GB_transpose (NULL, ctype, csc, A, op) ; // GB_transpose (&A, ctype, csc, A, op) ; // C=A' is transposed in place, in the matrix A. // The matrix A_in is not freed if an error occurs. A = A_in ; Chandle = &A ; // C must not be freed if an error occurs C = A ; in_place_C = false ; in_place_A = true ; // A is modified in place ASSERT (A == C && A == (*Chandle)) ; } else { //---------------------------------------------------------------------- // C = A' ; C and A are different //---------------------------------------------------------------------- // GB_transpose (&C, ctype, csc, A, op) ; // C and A are both non-NULL, and not aliased. // C=A' where C is a new matrix constructed here. // The matrix C is freed if an error occurs, and C is set to NULL. A = A_in ; C = NULL ; (*Chandle) = NULL ; // C must be allocated; freed on error in_place_C = false ; // C and A are different matrices in_place_A = false ; ASSERT (A != C && A != (*Chandle)) ; } bool in_place = (in_place_A || in_place_C) ; ASSERT_OK_OR_JUMBLED (GB_check (A, "A input for GB_transpose", GB0)) ; ASSERT_OK_OR_NULL (GB_check (ctype, "ctype for GB_transpose", GB0)) ; ASSERT_OK_OR_NULL (GB_check (op_in, "op for GB_transpose", GB0)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; //-------------------------------------------------------------------------- // determine the number of threads to use here //-------------------------------------------------------------------------- int64_t anz = GB_NNZ (A) ; int64_t anvec = A->nvec ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- GrB_Info info ; GrB_Type atype = A->type ; size_t asize = atype->size ; GB_Type_code acode = atype->code ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; int64_t aplen = A->plen ; bool A_is_hyper = A->is_hyper ; double A_hyper_ratio = A->hyper_ratio ; int64_t anzmax = A->nzmax ; // if in place, these must be freed when done, whether successful or not int64_t *restrict Ap = A->p ; int64_t *restrict Ah = A->h ; int64_t *restrict Ai = A->i ; GB_void *restrict Ax = A->x ; bool Ap_shallow = A->p_shallow ; bool Ah_shallow = A->h_shallow ; bool Ai_shallow = A->i_shallow ; bool Ax_shallow = A->x_shallow ; // free prior content of A, if transpose is done in place #define GB_FREE_IN_PLACE_A \ { \ if (in_place) \ { \ /* A is being transposed in placed */ \ /* free prior content of A but not &A itself */ \ if (!Ap_shallow) GB_FREE_MEMORY (Ap, aplen+1, sizeof (int64_t)) ;\ if (!Ah_shallow) GB_FREE_MEMORY (Ah, aplen , sizeof (int64_t)) ;\ if (!Ai_shallow) GB_FREE_MEMORY (Ai, anzmax , sizeof (int64_t)) ;\ if (!Ax_shallow) GB_FREE_MEMORY (Ax, anzmax , asize) ; \ } \ else \ { \ /* A is not modified; it is purely an input matrix */ \ ; \ } \ } // free the new C matrix, unless C=A' is being done in place of A #define GB_FREE_C \ { \ if (!in_place_A) \ { \ /* free all of C and all its contents &C */ \ GB_MATRIX_FREE (Chandle) ; \ } \ } // free both A (if in place) and C (if not in place of A) #define GB_FREE_A_AND_C \ { \ GB_FREE_IN_PLACE_A ; \ GB_FREE_C ; \ } //-------------------------------------------------------------------------- // determine the type of C and get the unary operator //-------------------------------------------------------------------------- GrB_UnaryOp op ; if (op_in == NULL) { // no operator op = NULL ; if (ctype == NULL) { // no typecasting if ctype is NULL ctype = atype ; } } else { // If a unary operator z=op(x) is present, C is always returned as // op->ztype. The input ctype is ignored. if (op_in->opcode == GB_IDENTITY_opcode && atype == op_in->xtype) { // op is a built-in identity operator, with the same type as A, so // do not apply the operator and do not typecast. ASSERT (op_in->ztype == op_in->xtype) ; op = NULL ; ctype = atype ; } else { // apply the operator, z=op(x) op = op_in ; ctype = op->ztype ; } } GB_Type_code ccode = ctype->code ; size_t csize = ctype->size ; //-------------------------------------------------------------------------- // C = A' //-------------------------------------------------------------------------- ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ; bool allocate_new_Cx = (ctype != atype) || (op != NULL) ; if (anz == 0) { //====================================================================== // quick return if A is empty //====================================================================== GB_FREE_IN_PLACE_A ; // A is empty; create a new empty matrix C, with the new type and // dimensions. C is hypersparse for now but may convert when // returned. GB_CREATE (Chandle, ctype, avdim, avlen, GB_Ap_calloc, C_is_csc, GB_FORCE_HYPER, A_hyper_ratio, 1, 1, true, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_C ; return (info) ; } ASSERT_OK (GB_check (*Chandle, "C transpose empty", GB0)) ; } else if (avdim == 1) { //====================================================================== // transpose a "column" vector into a "row" //====================================================================== // transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen). // A must be already sorted on input ASSERT_OK (GB_check (A, "the vector A must already be sorted", GB0)) ; //---------------------------------------------------------------------- // allocate space //---------------------------------------------------------------------- // Allocate the header of C, with no C->p, C->h, C->i, or C->x // content, and initialize the type and dimension of C. If in // place, A->p, A->h, A->i, and A->x are all NULL. The new matrix // is hypersparse, but can be CSR or CSC. This step does not // allocate anything if in place. // if *Chandle == NULL, allocate a new header; otherwise reuse existing GB_NEW (Chandle, ctype, 1, avlen, GB_Ap_null, C_is_csc, GB_FORCE_HYPER, A_hyper_ratio, 0, Context) ; if (info != GrB_SUCCESS) { // out of memory ASSERT (!in_place) ; // cannot fail if in place GB_FREE_C ; return (info) ; } if (!in_place) { C = (*Chandle) ; } else { ASSERT (A == C && A == (*Chandle)) ; } // allocate new space for the values and pattern GB_void *restrict Cx = NULL ; int64_t *restrict Cp ; int64_t *restrict Ci ; GB_MALLOC_MEMORY (Cp, anz+1, sizeof (int64_t)) ; GB_CALLOC_MEMORY (Ci, anz , sizeof (int64_t)) ; if (allocate_new_Cx) { // allocate new space for the new typecasted numerical values of C GB_MALLOC_MEMORY (Cx, anz, ctype->size) ; } if (Cp == NULL || Ci == NULL || (allocate_new_Cx && (Cx == NULL))) { // out of memory GB_FREE_MEMORY (Cp, anz+1, sizeof (int64_t)) ; GB_FREE_MEMORY (Ci, anz , sizeof (int64_t)) ; GB_FREE_MEMORY (Cx, anz , csize) ; GB_FREE_A_AND_C ; return (GB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // the transpose will now succeed; fill the content of C //---------------------------------------------------------------------- // numerical values: apply the operator, typecast, or make shallow copy if (op != NULL) { // Cx = op ((op->xtype) Ax) C->x = Cx ; C->x_shallow = false ; GB_apply_op (Cx, op, Ax, atype, anz, Context) ; // prior Ax will be freed } else if (ctype != atype) { // copy the values from A into C and cast from atype to ctype C->x = Cx ; C->x_shallow = false ; GB_cast_array (Cx, ccode, Ax, acode, anz, Context) ; // prior Ax will be freed } else // ctype == atype { // no type change; numerical values of C are a shallow copy of A. C->x = Ax ; C->x_shallow = (in_place) ? Ax_shallow : true ; Ax = NULL ; // do not free prior Ax } // each entry in A becomes a non-empty vector in C C->h = Ai ; C->h_shallow = (in_place) ? Ai_shallow : true ; Ai = NULL ; // do not free prior Ai C->nzmax = anz ; // C->p = 0:anz and C->i = zeros (1,anz), newly allocated C->plen = anz ; C->nvec = anz ; C->nvec_nonempty = anz ; C->i = Ci ; C->i_shallow = false ; C->p = Cp ; C->p_shallow = false ; // fill the vector pointers C->p #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k <= anz ; k++) { Cp [k] = k ; } C->magic = GB_MAGIC ; //---------------------------------------------------------------------- // free prior space //---------------------------------------------------------------------- GB_FREE_IN_PLACE_A ; } else if (avlen == 1) { //====================================================================== // transpose a "row" into a "column" vector //====================================================================== // transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1). // if A->vlen is 1, all vectors of A are implicitly sorted ASSERT_OK (GB_check (A, "1-by-n input A already sorted", GB0)) ; //---------------------------------------------------------------------- // allocate space //---------------------------------------------------------------------- // Allocate the header of C, with no C->p, C->h, C->i, or C->x // content, and initialize the type and dimension of C. If in // place, A->p, A->h, A->i, and A->x are all NULL. The new matrix // is NON-hypersparse, but can be CSR or CSC. This step does not // allocate anything if in place. // if *Chandle == NULL, allocate a new header; otherwise reuse existing GB_NEW (Chandle, ctype, avdim, 1, GB_Ap_null, C_is_csc, GB_FORCE_NONHYPER, A_hyper_ratio, 0, Context) ; if (info != GrB_SUCCESS) { // out of memory ASSERT (!in_place) ; // cannot fail if in place GB_FREE_C ; return (info) ; } if (!in_place) { C = (*Chandle) ; } else { ASSERT (A == C && A == (*Chandle)) ; } // allocate new space for the values and pattern GB_void *restrict Cx = NULL ; int64_t *restrict Cp ; int64_t *restrict Ci = NULL ; GB_CALLOC_MEMORY (Cp, 2, sizeof (int64_t)) ; bool allocate_new_Ci = (!A_is_hyper) ; if (allocate_new_Ci) { // A is not hypersparse, so new space is needed for Ci GB_MALLOC_MEMORY (Ci, anz, sizeof (int64_t)) ; } if (allocate_new_Cx) { // allocate new space for the new typecasted numerical values of C GB_MALLOC_MEMORY (Cx, anz, ctype->size) ; } if (Cp == NULL || (allocate_new_Cx && (Cx == NULL)) || (allocate_new_Ci && (Ci == NULL))) { // out of memory GB_FREE_MEMORY (Cp, 2 , sizeof (int64_t)) ; GB_FREE_MEMORY (Ci, anz , sizeof (int64_t)) ; GB_FREE_MEMORY (Cx, anz , csize) ; GB_FREE_A_AND_C ; return (GB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // numerical values of C: apply the op, typecast, or make shallow copy //---------------------------------------------------------------------- if (op != NULL) { // Cx = op ((op->xtype) Ax) C->x = Cx ; C->x_shallow = false ; GB_apply_op (Cx, op, Ax, atype, anz, Context) ; // prior Ax will be freed } else if (ctype != atype) { // copy the values from A into C and cast from atype to ctype C->x = Cx ; C->x_shallow = false ; GB_cast_array (Cx, ccode, Ax, acode, anz, Context) ; // prior Ax will be freed } else // ctype == atype { // no type change; numerical values of C are a shallow copy of A C->x = Ax ; C->x_shallow = (in_place) ? Ax_shallow : true ; Ax = NULL ; // do not free prior Ax } //---------------------------------------------------------------------- // pattern of C //---------------------------------------------------------------------- if (A_is_hyper) { //------------------------------------------------------------------ // each non-empty vector in A becomes an entry in C //------------------------------------------------------------------ ASSERT (!allocate_new_Ci) ; C->i = Ah ; C->i_shallow = (in_place) ? Ah_shallow : true ; ASSERT (anvec == anz) ; Ah = NULL ; // do not free prior Ah } else { //------------------------------------------------------------------ // find the non-empty vectors of A, which become entries in C //------------------------------------------------------------------ ASSERT (allocate_new_Ci) ; ASSERT (Ah == NULL) ; int nth = GB_nthreads (avdim, chunk, nthreads_max) ; if (nth == 1) { //-------------------------------------------------------------- // construct Ci with a single thread //-------------------------------------------------------------- int64_t k = 0 ; for (int64_t j = 0 ; j < avdim ; j++) { if (Ap [j] < Ap [j+1]) { Ci [k++] = j ; } } ASSERT (k == anz) ; } else { //-------------------------------------------------------------- // construct Ci in parallel //-------------------------------------------------------------- int ntasks = (nth == 1) ? 1 : (8 * nth) ; ntasks = GB_IMIN (ntasks, avdim) ; ntasks = GB_IMAX (ntasks, 1) ; int64_t Count [ntasks+1] ; #pragma omp parallel for num_threads(nth) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = 0 ; GB_PARTITION (jstart, jend, avdim, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap [j] < Ap [j+1]) { k++ ; } } Count [tid] = k ; } GB_cumsum (Count, ntasks, NULL, 1) ; ASSERT (Count [ntasks] == anz) ; #pragma omp parallel for num_threads(nth) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = Count [tid] ; GB_PARTITION (jstart, jend, avdim, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap [j] < Ap [j+1]) { Ci [k++] = j ; } } } } #ifdef GB_DEBUG int64_t k = 0 ; for (int64_t j = 0 ; j < avdim ; j++) { if (Ap [j] < Ap [j+1]) { ASSERT (Ci [k] == j) ; k++ ; } } ASSERT (k == anz) ; #endif C->i = Ci ; C->i_shallow = false ; } //---------------------------------------------------------------------- // vector pointers of C //---------------------------------------------------------------------- C->nzmax = anz ; // C->p = [0 anz] and C->h = NULL ASSERT (C->plen == 1) ; ASSERT (C->nvec == 1) ; ASSERT (C->h == NULL) ; C->p = Cp ; C->p_shallow = false ; C->nvec_nonempty = (anz == 0) ? 0 : 1 ; // fill the vector pointers C->p Cp [0] = 0 ; Cp [1] = anz ; C->magic = GB_MAGIC ; //---------------------------------------------------------------------- // free prior space //---------------------------------------------------------------------- GB_FREE_IN_PLACE_A ; } else { //====================================================================== // transpose a general matrix //====================================================================== ASSERT_OK_OR_JUMBLED (GB_check (A, "A GB_transpose jumbled ok", GB0)) ; ASSERT (avdim > 1 && avlen > 1) ; // T=A' with optional typecasting, or T=op(A') //---------------------------------------------------------------------- // select the method //---------------------------------------------------------------------- // for the qsort method, if the transpose is done in place and A->i is // not shallow, A->i can be used and then freed. Otherwise, A->i is // not modified at all. bool recycle_Ai = (in_place && !Ai_shallow) ; bool use_qsort ; if (A_is_hyper) { //------------------------------------------------------------------ // always use qsort for hypersparse matrices //------------------------------------------------------------------ use_qsort = true ; } else { //------------------------------------------------------------------ // select qsort if the transpose will likely be hypersparse //------------------------------------------------------------------ use_qsort = GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET (anz, avlen) ; } //---------------------------------------------------------------------- // transpose the matrix with the selected method //---------------------------------------------------------------------- if (use_qsort) { //================================================================== // transpose via quicksort //================================================================== //------------------------------------------------------------------ // allocate and create iwork //------------------------------------------------------------------ // allocate iwork of size anz int64_t *iwork ; GB_MALLOC_MEMORY (iwork, anz, sizeof (int64_t)) ; if (iwork == NULL) { // out of memory GB_FREE_C ; return (GB_OUT_OF_MEMORY) ; } // Construct the "row" indices of C, which are "column" indices of // A. This array becomes the permanent T->i on output. This phase // must be done before Chandle is created below, since that step // destroys A. GB_extract_vector_list (iwork, A, nthreads) ; //------------------------------------------------------------------ // allocate the output matrix and additional space (jwork and S) //------------------------------------------------------------------ // Allocate the header of C, with no C->p, C->h, C->i, or C->x // content, and initialize the type and dimension of C. If in // place, A->p, A->h, A->i, and A->x are all NULL. The new matrix // is hypersparse, but can be CSR or CSC. This step does not // allocate anything if in place. // if *Chandle == NULL, allocate a new header; otherwise reuse GB_NEW (Chandle, ctype, avdim, avlen, GB_Ap_null, C_is_csc, GB_FORCE_HYPER, A_hyper_ratio, 0, Context) ; if (info != GrB_SUCCESS) { // out of memory ASSERT (!in_place) ; // cannot fail if in place GB_FREE_MEMORY (iwork, anz, sizeof (int64_t)) ; GB_FREE_C ; return (info) ; } if (!in_place) { C = (*Chandle) ; } else { ASSERT (A == C && A == (*Chandle)) ; } // if in_place, the prior Ap and Ah can now be freed if (in_place) { if (!Ap_shallow) GB_FREE_MEMORY (Ap, aplen+1, sizeof (int64_t)); if (!Ah_shallow) GB_FREE_MEMORY (Ah, aplen , sizeof (int64_t)); } int64_t *jwork = NULL ; GB_Type_code scode ; GB_void *S = NULL ; GB_void *Swork = NULL ; if (!recycle_Ai) { // allocate jwork of size anz GB_MALLOC_MEMORY (jwork, anz, sizeof (int64_t)) ; } if (op != NULL) { // allocate Swork of size anz * csize GB_MALLOC_MEMORY (Swork, anz, csize) ; } if ((!recycle_Ai && (jwork == NULL)) || ((op != NULL) && (Swork == NULL))) { // out of memory GB_FREE_MEMORY (iwork, anz, sizeof (int64_t)) ; GB_FREE_MEMORY (jwork, anz, sizeof (int64_t)) ; GB_FREE_MEMORY (Swork, anz, csize) ; GB_FREE_A_AND_C ; return (GB_OUT_OF_MEMORY) ; } //------------------------------------------------------------------ // construct jwork and Swork //------------------------------------------------------------------ // "row" indices of A become "column" indices of C if (recycle_Ai) { // Ai is used as workspace for the "column" indices of C. // jwork is a shallow copy of Ai, and is freed by GB_builder. jwork = Ai ; ASSERT (in_place) ; // set Ai to NULL so it is not freed by GB_FREE_IN_PLACE_A Ai = NULL ; } else { // jwork = Ai, making a deep copy. jwork is freed by // GB_builder. A->i is not modified, even if out of memory. GB_memcpy (jwork, Ai, anz * sizeof (int64_t), nthreads) ; } // numerical values: apply the op, typecast, or make shallow copy if (op != NULL) { // Swork = op ((op->xtype) Ax) GB_apply_op (Swork, op, Ax, atype, anz, Context) ; // GB_builder will not need to typecast Swork to T->x, and it // may choose to transplant it into T->x scode = ccode ; #if 0 if (in_place && !Ax_shallow) { // A is being transposed in place so A->x is no longer // needed. If A->x is shallow this can be skipped. T->x // will not be shallow if the op is present. A->x should // be freed early to free up space for GB_builder. // However, in the current usage, when op is used, A is not // transposed in place, so this step is not needed. ASSERT (GB_DEAD_CODE) ; GB_FREE_MEMORY (Ax, anzmax , asize) ; } #endif } else { // GB_builder will typecast S from atype to ctype if needed. // S is a shallow copy of Ax, and must not be modified. S = Ax ; scode = acode ; } //------------------------------------------------------------------ // build the matrix: T = (ctype) A' or op ((xtype) A') //------------------------------------------------------------------ // internally, jwork is freed and then T->x is allocated, so the // total high-water memory usage is anz * max (csize, // sizeof(int64_t)). T is always hypersparse. // If op is not NULL, then Swork can be transplanted into T in // GB_builder, instead. However, this requires the tuples to be // sorted on input, which is possible but rare for GB_transpose. GrB_Matrix T ; info = GB_builder ( &T, // create T ctype, // T is of type ctype avdim, // T->vlen = A->vdim, always > 1 avlen, // T->vdim = A->vlen, always > 1 C_is_csc, // T has the same CSR/CSC format as C &iwork, // iwork_handle, becomes T->i on output &jwork, // jwork_handle, freed on output &Swork, // Swork_handle, freed on output false, // tuples are not sorted on input true, // tuples have no duplicates anz, // size of iwork, jwork, and Swork true, // is_matrix: unused false, // ijcheck: unused NULL, NULL, // original I,J indices: not used here S, // array of values of type scode, not modified anz, // number of tuples NULL, // no dup operator needed (input has no duplicates) scode, // type of S or Swork Context ) ; // GB_builder always frees jwork, and either frees iwork or // transplants it in to T->i and sets iwork to NULL. So iwork and // jwork are always NULL on output. GB_builder does not modify S. ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ; //------------------------------------------------------------------ // free prior space and transplant T into C //------------------------------------------------------------------ // Free the prior content of the input matrix, if done in place. // Ap, Ah, and Ai have already been freed, but Ax has not. GB_FREE_IN_PLACE_A ; if (info != GrB_SUCCESS) { // out of memory in GB_builder GB_FREE_A_AND_C ; return (info) ; } // Transplant T in to the result C. The matrix T is not shallow // and no typecasting is done, so this will always succeed. info = GB_transplant (*Chandle, ctype, &T, Context) ; ASSERT (info == GrB_SUCCESS) ; } else { //================================================================== // transpose via bucket sort //================================================================== // This method does not operate on the matrix in place, so it must // create a temporary matrix T. Then the input matrix is freed and // replaced with the new matrix T. ASSERT (!A_is_hyper) ; // T is also typecasted to ctype, if not NULL GrB_Matrix T ; info = GB_transpose_bucket (&T, ctype, C_is_csc, A, op, Context) ; // free prior content, if C=A' is being done in place if (in_place_A) { // free all content of A, but not the header, if in place of A GB_PHIX_FREE (A) ; // transpose in-place } else if (in_place_C) { // free all of C, including the header, if done in place of C GB_MATRIX_FREE (Chandle) ; } if (info != GrB_SUCCESS) { // out of memory in GB_transpose_bucket GB_FREE_C ; return (info) ; } ASSERT_OK (GB_check (T, "T from bucket", GB0)) ; if (in_place_A) { // The header of A has not been freed, since it is used for the // output. Transplant T back into A and free T. T is not // shallow and no typecast is done so this will always succeed. info = GB_transplant (A, ctype, &T, Context) ; ASSERT (info == GrB_SUCCESS) ; } else { // If C=A' is done in place of C, then the header and content // of the input C has been freed. The output T can now be // moved to the Chandle. ASSERT (*Chandle == NULL) ; (*Chandle) = T ; } } } //-------------------------------------------------------------------------- // conform the result to the desired hypersparsity of A //-------------------------------------------------------------------------- // get the output matrix C = (*Chandle) ; // transplant the hyper_ratio from A to C C->hyper_ratio = A_hyper_ratio ; ASSERT_OK (GB_check (C, "C to conform in GB_transpose", GB0)) ; info = GB_to_hyper_conform (C, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_C ; return (info) ; } ASSERT_OK (GB_check (*Chandle, "Chandle conformed in GB_transpose", GB0)) ; return (GrB_SUCCESS) ; }
core_zlange.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /***************************************************************************//** * * @ingroup core_lange * * Calculates max, one, infinity or Frobenius norm of a given matrix. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] m * The number of rows of the matrix A. m >= 0. When m = 0, * the returned value is set to zero. * * @param[in] n * The number of columns of the matrix A. n >= 0. When n = 0, * the returned value is set to zero. * * @param[in] A * The m-by-n matrix A. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[in] work * The auxiliary work array. * * @param[out] value * The specified norm of the given matrix A * ******************************************************************************/ void core_zlange(plasma_enum_t norm, int m, int n, const plasma_complex64_t *A, int lda, double *work, double *value) { *value = LAPACKE_zlange_work(LAPACK_COL_MAJOR, lapack_const(norm), m, n, A, lda, work); } /******************************************************************************/ void core_omp_zlange(int norm, int m, int n, const plasma_complex64_t *A, int lda, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) core_zlange(norm, m, n, A, lda, work, value); } } /******************************************************************************/ void core_omp_zlange_aux(int norm, int m, int n, const plasma_complex64_t *A, int lda, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { switch (norm) { case PlasmaOneNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:n]) { if (sequence->status == PlasmaSuccess) { for (int j = 0; j < n; j++) { value[j] = cabs(A[lda*j]); for (int i = 1; i < m; i++) { value[j] += cabs(A[lda*j+i]); } } } } break; case PlasmaInfNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:m]) { if (sequence->status == PlasmaSuccess) { for (int i = 0; i < m; i++) value[i] = 0.0; for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { value[i] += cabs(A[lda*j+i]); } } } } break; } }
main.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> int hamming(int l, u_int8_t *a, u_int8_t *b); int timedif(struct timespec *start, struct timespec *stop); int main(int argc, char **argv) { int m, n, l, t, i, j, k, distSumR = 0, distSumC = 0, distSumH = 0; // m = A strings, n = B strings , l = string length __uint8_t **a, **b; int **distSerial, **distR, **distC, **distCH; // Tables srand(time(NULL)); // init rand struct timespec startTime, endTime; int serialT, rowT, cellT, charT; omp_lock_t **locks; // Check arguments if (argc != 5) { printf("Invalid Arguments"); return -1; } // Assign Arguments m = atoi(argv[1]); n = atoi(argv[2]); l = atoi(argv[3]); t = atoi(argv[4]); // printf("\nArguments"); // printf("\n-------------------"); // printf("\n m: %d",m); // printf("\n n: %d",n); // printf("\n l: %d",l); // printf("\n t: %d\n",t); printf("**************************\n"); printf("********* OpenMP *********\n"); printf("**************************\n"); printf("- Initialization"); fflush(stdout); // OpenMP Thread Limit omp_set_num_threads(t); // Allocate Tables // distSerial = malloc(m * sizeof(int **)); distC = malloc(m * sizeof(int **)); distR = malloc(m * sizeof(int **)); distCH = malloc(m * sizeof(int **)); locks = malloc(m * sizeof(omp_lock_t **)); for (i = 0; i < m; i++) { // distSerial[i] = malloc(n * sizeof(int)); distR[i] = malloc(n * sizeof(int)); distC[i] = malloc(n * sizeof(int)); distCH[i] = malloc(n * sizeof(int)); locks[i] = malloc(n * sizeof(omp_lock_t *)); } for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { distR[i][j] = 0; distC[i][j] = 0; distCH[i][j] = 0; } } printf("."); fflush(stdout); a = malloc(m * sizeof(int *)); for (i = 0; i < m; i++) { a[i] = malloc(l * sizeof(u_int8_t)); } b = malloc(n * sizeof(int *)); for (i = 0; i < n; i++) { b[i] = malloc(l * sizeof(u_int8_t)); } printf("."); fflush(stdout); // init A set for (int i = 0; i < m; i++) { for (int j = 0; j < l; j++) { a[i][j] = rand() % 2; // random number between 0 and 1 } } // init B set for (int i = 0; i < n; i++) { for (int j = 0; j < l; j++) { b[i][j] = rand() % 2; // random number between 0 and 1 } } printf(".\n"); fflush(stdout); // init distCH and thread locks for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { distCH[i][j] = 0; omp_init_lock(&locks[i][j]); } } // HAMMING // No Parallelization // printf("- Serial Execution: "); // fflush(stdout); // clock_gettime(CLOCK_REALTIME, &startTime); // for (i = 0; i < m; i++) { // for (j = 0; j < n; j++) { // distSerial[i][j] = hamming(l, a[i], b[j]); // } // } // clock_gettime(CLOCK_REALTIME, &endTime); // printf("%d ms\n", timedif(&startTime, &endTime)); // Parallelize each row // Each task take a string from array "a" and // processes it with every string from array "b". printf("- Parallel by Row Execution: "); fflush(stdout); clock_gettime(CLOCK_REALTIME, &startTime); #pragma omp parallel for schedule(dynamic) private(j) shared(a, b, distR) for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { distR[i][j] = hamming(l, a[i], b[j]); distSumR += distR[i][j]; } } clock_gettime(CLOCK_REALTIME, &endTime); printf("%d ms\n", timedif(&startTime, &endTime)); // Parallelize each cell // Each task takes a string from array "a" and // processes it with a string from array "b". printf("- Parallel by Cell Execution: "); fflush(stdout); clock_gettime(CLOCK_REALTIME, &startTime); #pragma omp parallel for schedule(dynamic) collapse(2) private(i, j) shared(a, b, distC) for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { distC[i][j] = hamming(l, a[i], b[j]); distSumC += distC[i][j]; } } clock_gettime(CLOCK_REALTIME, &endTime); printf("%d ms\n", timedif(&startTime, &endTime)); // Parallelize each string // Each task takes a character from a string from array "a" and processes it // with the corresponding character from a string in array "b". printf("- Parallel by Char Execution: "); fflush(stdout); clock_gettime(CLOCK_REALTIME, &startTime); #pragma omp parallel for private(i, j, k) shared(a, b, distCH, locks) for (i = 0; i < m; i++) { #pragma omp parallel for private(j, k) shared(a, b, distCH, locks) for (j = 0; j < n; j++) { #pragma omp parallel for private(k) shared(a, b, distCH, locks) for (k = 0; k < l; k++) { if (a[i][k] != b[j][k]) { //set lock for string omp_set_lock(&locks[i][j]); //increase strings hamming distance //#pragma omp atomic update distCH[i][j]++; //unset lock for string omp_unset_lock(&locks[i][j]); } } } } clock_gettime(CLOCK_REALTIME, &endTime); printf("%d ms\n", timedif(&startTime, &endTime)); printf("\n"); // for (i = 0; i < m; i++) { // for (j = 0; j < n; j++) { // distSumH += distCH[i][j]; // } // } // Check Hamming Distances // for (i = 0; i < m; i++) { // for (j = 0; j < n; j++) { // if (distSerial[i][j] != distR[i][j]) { // printf("Row Parallelization Data Mismatch"); // return (-1); // } // if (distSerial[i][j] != distC[i][j]) { // printf("Cell Parallelization Data Mismatch"); // return (-1); // } // if (distSerial[i][j] != distCH[i][j]) { // printf("Character Parallelization Data Mismatch"); // return (-1); // } // } // } return 0; } //Hamming Distance Calculator between two arrays of l size int hamming(int l, u_int8_t *a, u_int8_t *b) { int k = 0; for (int i = 0; i < l; i++) { if (a[i] != b[i]) { k++; } } return k; } int timedif(struct timespec *start, struct timespec *stop) { return ((int) ((stop->tv_sec - start->tv_sec) * 1000) + (int) ((stop->tv_nsec - start->tv_nsec) / 1000000)); }
eavlInfoTopologySparseMapOp.h
// Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information. #ifndef EAVL_INFO_TOPOLOGY_SPARSE_MAP_OP_H #define EAVL_INFO_TOPOLOGY_SPARSE_MAP_OP_H #include "eavlCUDA.h" #include "eavlCellSet.h" #include "eavlCellSetExplicit.h" #include "eavlCellSetAllStructured.h" #include "eavlDataSet.h" #include "eavlArray.h" #include "eavlOpDispatch.h" #include "eavlOperation.h" #include "eavlTopology.h" #include "eavlException.h" #include <time.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #ifndef DOXYGEN template <class CONN> struct eavlInfoTopologySparseMapOp_CPU { static inline eavlArray::Location location() { return eavlArray::HOST; } template <class F, class IN, class OUT, class INDEX> static void call(int nitems, CONN &conn, const IN inputs, OUT outputs, INDEX indices, F &functor) { int *sparseindices = get<0>(indices).array; #pragma omp parallel for for (int denseindex = 0; denseindex < nitems; ++denseindex) { int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)]; int shapeType = conn.GetShapeType(sparseindex); collect(sparseindex, outputs) = functor(shapeType, collect(sparseindex, inputs)); } } }; #if defined __CUDACC__ template <class CONN, class F, class IN, class OUT, class INDEX> __global__ void eavlInfoTopologySparseMapOp_kernel(int nitems, CONN conn, const IN inputs, OUT outputs, INDEX indices, F functor) { int *sparseindices = get<0>(indices).array; const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int denseindex = threadID; denseindex < nitems; denseindex += numThreads) { int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)]; int shapeType = conn.GetShapeType(sparseindex); collect(sparseindex, outputs) = functor(shapeType, collect(sparseindex, inputs)); } } template <class CONN> struct eavlInfoTopologySparseMapOp_GPU { static inline eavlArray::Location location() { return eavlArray::DEVICE; } template <class F, class IN, class OUT, class INDEX> static void call(int nitems, CONN &conn, const IN inputs, OUT outputs, INDEX indices, F &functor) { int numThreads = 256; dim3 threads(numThreads, 1, 1); dim3 blocks (32, 1, 1); eavlInfoTopologySparseMapOp_kernel<<< blocks, threads >>>(nitems, conn, inputs, outputs, indices, functor); CUDA_CHECK_ERROR(); } }; #endif #endif // **************************************************************************** // Class: eavlInfoTopologySparseMapOp // // Purpose: /// Map from one element in a mesh to the same element, with /// topological information passed along to the functor. /// In this sparse version of the operation, the inputs on the destination /// topology and the outputs are all sparsely indexed by the index array. // // Programmer: Jeremy Meredith // Creation: August 1, 2013 // // Modifications: // **************************************************************************** template <class I, class O, class INDEX, class F> class eavlInfoTopologySparseMapOp : public eavlOperation { protected: eavlCellSet *cells; eavlTopology topology; I inputs; O outputs; INDEX indices; F functor; public: eavlInfoTopologySparseMapOp(eavlCellSet *c, eavlTopology t, I i, O o, INDEX ind, F f) : cells(c), topology(t), inputs(i), outputs(o), indices(ind), functor(f) { } virtual void GoCPU() { eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); eavlOpDispatch<eavlInfoTopologySparseMapOp_CPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, indices, functor); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlInfoTopologySparseMapOp_CPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, indices, functor); } } virtual void GoGPU() { #ifdef HAVE_CUDA eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); conn.shapetype.NeedOnDevice(); conn.connectivity.NeedOnDevice(); conn.mapCellToIndex.NeedOnDevice(); eavlOpDispatch<eavlInfoTopologySparseMapOp_GPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, indices, functor); conn.shapetype.NeedOnHost(); conn.connectivity.NeedOnHost(); conn.mapCellToIndex.NeedOnHost(); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlInfoTopologySparseMapOp_GPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, indices, functor); } #else THROW(eavlException,"Executing GPU code without compiling under CUDA compiler."); #endif } }; // helper function for type deduction template <class I, class O, class INDEX, class F> eavlInfoTopologySparseMapOp<I,O,INDEX,F> *new_eavlInfoTopologySparseMapOp(eavlCellSet *c, eavlTopology t, I i, O o, INDEX indices, F f) { return new eavlInfoTopologySparseMapOp<I,O,INDEX,F>(c,t,i,o,indices,f); } #endif
GB_unop__identity_int64_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int64_fp32) // op(A') function: GB (_unop_tran__identity_int64_fp32) // C type: int64_t // A type: float // cast: int64_t cij = GB_cast_to_int64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = GB_cast_to_int64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = GB_cast_to_int64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int64_fp32) ( int64_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; int64_t z = GB_cast_to_int64_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; int64_t z = GB_cast_to_int64_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int64_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kmer_counter.h
#ifndef KMER_COUNTER_H__ #define KMER_COUNTER_H__ #include <cstdint> #include <vector> #include <cstdlib> #include <type_traits> #include "khash64.h" #include "./encoder.h" namespace kmerc { KHASH_MAP_INIT_INT64(i16, uint16_t) KHASH_SET_INIT_INT64(i16s) template<typename C, typename IT=uint64_t, typename ArgType> std::vector<khash_t(i16)> build_kmer_counts(const C &kmer_sizes, ArgType fp, bool canon=false, size_t presize=0) { static_assert(std::is_same<ArgType, gzFile>::value || std::is_same<ArgType, char *>::value || std::is_same<ArgType, const char *>::value, "Must be gzFile, char *, or const char *"); bns::RollingHasherSet<IT> rhs(kmer_sizes, canon); using T = khash_t(i16); std::vector<T> kmer_maps(kmer_sizes.size()); std::memset(&kmer_maps[0], 0, sizeof(kmer_maps[0]) * kmer_sizes.size()); if(presize) for(auto &x: kmer_maps) kh_resize(i16, &x, presize); rhs.for_each_hash([&kmer_maps](IT hashvalue, size_t idx){ auto map_ptr = &kmer_maps[idx]; if((idx = kh_get(i16, map_ptr, hashvalue)) != kh_end(map_ptr)) { auto &val = map_ptr->vals[idx]; val += (val != std::numeric_limits<std::decay_t<decltype(*map_ptr->vals)>>::max()); } else { int khr; idx = kh_put(i16, map_ptr, hashvalue, &khr); if(khr < 0) { std::fprintf(stderr, "Error: khr is %d\n", khr); throw std::runtime_error("Failed to insert."); } LOG_ASSERT(idx < map_ptr->n_buckets); map_ptr->vals[idx] = 1u; } }, fp); return kmer_maps; } template<typename C, typename IT=uint64_t, typename ArgType,typename Allocator=sketch::common::Allocator<IT>> std::vector<std::vector<IT, Allocator>> build_kmer_sets(const C &kmer_sizes, ArgType fp, bool canon=false, size_t presize=0) { static_assert(std::is_same<ArgType, gzFile>::value || std::is_same<ArgType, char *>::value || std::is_same<ArgType, const char *>::value, "Must be gzFile, char *, or const char *"); bns::RollingHasherSet<IT> rhs(kmer_sizes, canon); using T = std::vector<IT, Allocator>; std::vector<T> kmer_sets(kmer_sizes.size()); if(presize) for(auto &x: kmer_sets) x.reserve(presize); rhs.for_each_hash([&kmer_sets](IT hashvalue, size_t idx){kmer_sets[idx].push_back(hashvalue);}, fp); OMP_PRAGMA("omp parallel for") for(size_t i = 0; i < kmer_sizes.size(); ++i) { auto &v = kmer_sets[i]; std::sort(v.begin(), v.end()); // TODO: provide support for other sorting methods v.erase(std::unique(v.begin(), v.end()), v.end()); } return kmer_sets; } enum DumpFlags: int { WRITE_SHS = 1, WRITE_KVMAP = 2 }; struct WriteFail {}; struct OpenFail {}; template<typename C, typename IT, typename ArgType> void dump_shs(const char *prefix, const C &kmer_sizes, ArgType cfp, bool canon, size_t presize=0) { auto shsets = build_kmer_sets(kmer_sizes, cfp, canon, presize); std::atomic<int> ret; ret.store(0); //#pragma omp parallel for for(size_t i = 0; i < kmer_sizes.size(); ++i) { auto &vec = shsets[i]; auto k = kmer_sizes[i]; std::string fn = std::string(prefix) + "." + std::to_string(k) + ".shs"; gzFile fp = gzopen(fn.data(), "wb"); if(!fp) throw std::runtime_error(std::string("Could not open file at ") + fn + " for writing"); uint64_t nelem = vec.size(); if(gzwrite(fp, &nelem, sizeof(nelem)) != sizeof(nelem)) ret.store(1 << (i % 64)); ssize_t nb = sizeof(vec[0]) * vec.size(); if(gzwrite(fp, vec.data(), nb) != nb) ret.store(1 << (i % 64)); } if(ret) { throw WriteFail{};//std::runtime_error("Failed to write"); } } template<typename C, typename IT=uint64_t, typename ArgType> void dump_maps(const char *prefix, const C &kmer_sizes, ArgType fp, bool canon=false, size_t presize=0, int flag=WRITE_SHS | WRITE_KVMAP) { if(flag == WRITE_SHS) { dump_shs<C, IT, ArgType>(prefix, kmer_sizes, fp, canon); } auto maps = build_kmer_counts(kmer_sizes, fp, canon, presize); std::vector<IT> buf; std::vector<uint16_t> buf16; for(size_t kszidx = 0; kszidx < kmer_sizes.size(); ++kszidx) { auto k = kmer_sizes[kszidx]; auto &map = maps[kszidx]; buf16.resize(kh_size(&map)); buf.resize(kh_size(&map)); size_t used = 0; for(size_t i = 0; i < map.n_buckets; ++i) { if(kh_exist(&map, i)) buf[used] = map.keys[i], buf16[used] = map.vals[i], ++used; } const uint64_t count = used; gzFile fp; if(flag & WRITE_KVMAP) { fp = gzopen((std::string(prefix) + "." + std::to_string(k) + ".bin").data(), "wb"); if(!fp) throw std::runtime_error("Could not open file."); gzwrite(fp, &count, sizeof(count)); gzwrite(fp, buf.data(), buf.size() * sizeof(buf[0])); gzwrite(fp, buf16.data(), buf16.size() * sizeof(buf16[0])); gzclose(fp); } if(flag & WRITE_SHS) { std::sort(buf.data(), buf.data() + buf.size()); fp = gzopen((std::string(prefix) + "." + std::to_string(k) + ".shs").data(), "wb"); gzwrite(fp, &count, sizeof(count)); gzwrite(fp, buf.data(), buf.size() * sizeof(buf[0])); gzclose(fp); } std::free(map.keys); std::free(map.vals); std::free(map.flags); } } } // kmerc #endif
dahua_fmt_plug.c
/* * Format for cracking Dahua hashes. * * http://www.securityfocus.com/archive/1/529799 * https://github.com/depthsecurity/dahua_dvr_auth_bypass * * This software is Copyright (c) 2014 Dhiru Kholia <dhiru at openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without# * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_dahua; #elif FMT_REGISTERS_H john_register_one(&fmt_dahua); #else #include <string.h> #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 512 #else #define OMP_SCALE 32768 // tuned K8-dual HT #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "arch.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "memdbg.h" #include <ctype.h> #define FORMAT_LABEL "dahua" #define FORMAT_NAME "\"MD5 based authentication\" Dahua" #define FORMAT_TAG "$dahua$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 8 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE 0 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"$dahua$4WzwxXxM", "888888"}, // from hashcat.net {"$dahua$HRG6OLE6", "Do You Even Lift?"}, {"$dahua$sh15yfFM", "666666"}, {"$dahua$6QNMIQGe", "admin"}, {"$dahua$g2UpKxOg", "passWOrd"}, {"$dahua$tlJwpbo6", ""}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p = ciphertext; int i; if (strncmp(p, FORMAT_TAG, TAG_LENGTH) != 0) return 0; p = p + TAG_LENGTH; if (!p) return 0; if (strlen(p) != BINARY_SIZE) return 0; for (i = 0; i < BINARY_SIZE; i++) if (!isalnum((int)(unsigned char)p[i])) return 0; return 1; } static void *get_binary(char *ciphertext) { static union { char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; char *p; char *out = buf.c; p = strrchr(ciphertext, '$') + 1; strncpy(out, p, BINARY_SIZE); return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } // from hashcat.net (alxchk) static void compressor(unsigned char *in, unsigned char *out) { int i, j; for (i = 0, j = 0; i < 16; i += 2, j++) { out[j] = (in[i] + in[i+1]) % 62; if (out[j] < 10) { out[j] += 48; } else if (out[j] < 36) { out[j] += 55; } else { out[j] += 61; } } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { // hash is compressor(md5(password)) MD5_CTX ctx; unsigned char *out = (unsigned char*)crypt_out[index]; unsigned char hash[16]; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], saved_len[index]); MD5_Final(hash, &ctx); compressor(hash, out); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void dahua_set_key(char *key, int index) { saved_len[index] = strlen(key); strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_dahua = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, dahua_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif
raytrace.h
/* * @Author: Philippe Dales * @Date: 2018-07-26 14:26:23 * @Last Modified by: Philippe Dales * @Last Modified time: 2018-07-26 14:26:23 */ /* Ray-tracer helper functions for estuary package */ #ifndef RAYTRACE_H #define RAYTRACE_H #include <assert.h> // #include "xseis/process.h" #include "xseis/structures.h" #include "xseis/beamform.h" // #include "xseis/npy.h" #include "narray.hpp" #include "nvect.hpp" #include "solver.hpp" namespace raytrace { // Compute traveltimes from stalocs to gridlocs given 1d viscosity (slowness) model // Uses correct depths but places gridloc cartesian(xy) from staloc // Make sure visc/tag grids are big enough to account for longest xy cartesian dist // (tag is modified in FMM, careful) Array2D<uint16_t> TTableFromVisc1D(Array2D<float>& stalocs, Array2D<float>& gridlocs, Array2D<double>& viscosity, Array2D<int>& tag, const float visc_spacing, float sr) { // consts used to make grids const size_t ndim = 2; const size_t npad = 2; // const float zshift = 500; const float xseed = 100; const auto shape = agsis::vect<size_t, ndim>(viscosity.ncol_, viscosity.nrow_); const size_t size = agsis::prod(shape); // array descriptor of viscosity auto viscosity_ad = ArrayDescriptor<double, ndim>(shape, viscosity.data_); // array descriptor of tag auto tag_ad = ArrayDescriptor<MarchingTag, ndim>(shape, new MarchingTag[size]); // array descriptor of traveltime grid, fill with INFS auto tgrid = Array2D<double>(shape[0], shape[1]); auto tgrid_ad = OrthogonalGrid<double, ndim>(shape, tgrid.data_, visc_spacing); size_t nsta = stalocs.nrow_; auto ttable = Array2D<uint16_t>(stalocs.nrow_, gridlocs.nrow_); auto *ttablerow = ttable.row(0); float *sl = nullptr; float *gl = nullptr; float dxy; float xdest, zdest; float tt; float zseed; auto seed = agsis::vect<double, ndim>(); // #pragma omp parallel for private(ttablerow, sl, gl, dxy, xdest, zdest, tt, zseed) for(size_t i = 0; i < nsta; ++i) { printf("%lu\n", i); ttablerow = ttable.row(i); sl = stalocs.row(i); zseed = sl[2]; seed[0] = static_cast<int>(zseed / visc_spacing + npad + 0.5); seed[1] = static_cast<int>(xseed / visc_spacing + npad + 0.5); for(size_t j = 0; j < tag.size_; ++j) { tag_ad[j] = static_cast<MarchingTag>(tag[j]); } for(size_t j = 0; j < tgrid_ad.size(); ++j) { tgrid_ad[j] = INFINITY; } FMM_SecondOrder(seed, tag_ad, viscosity_ad, tgrid_ad); for(size_t j = 0; j < gridlocs.nrow_; ++j) { gl = gridlocs.row(j); dxy = process::DistCartesian2D(gl, sl); xdest = static_cast<size_t>((xseed + dxy) / visc_spacing + npad + 0.5); zdest = static_cast<size_t>(gl[2] / visc_spacing + npad + 0.5); assert(xdest < tgrid.ncol_); assert(zdest < tgrid.nrow_); tt = tgrid(zdest, xdest); ttablerow[j] = static_cast<uint16_t>(tt * sr + 0.5); } } return ttable; } // Ray trace for 1D velocity model (tag is modified in FMM, careful) // Make sure visc/tag grids are big enough to account for longest xy cartesian dist Array2D<uint16_t> BuildTravelTime1D(Array2D<float>& stalocs, Array2D<float>& gridlocs, Array2D<double>& viscosity, Array2D<int>& tag, float sr) { // consts I use to make grids const size_t ndim = 2; const size_t npad = 2; const size_t visc_spacing = 5; const size_t zpad = 100; const float xseed = 100; // const auto shape = agsis::vect<size_t, ndim>(ds.nrow_, ds.ncol_); const auto shape = agsis::vect<size_t, ndim>(viscosity.ncol_, viscosity.nrow_); const size_t size = agsis::prod(shape); // double *data = new double[size]; // ds.load_full_buffer(data); auto viscosity_ad = ArrayDescriptor<double, ndim>(shape, viscosity.data_); // int *dtag = new int[size]; // hf["tag"].load_full_buffer(dtag); // auto tag_ad = ArrayDescriptor<MarchingTag, ndim>(shape, (MarchingTag *)tag.data_); auto tag_ad = ArrayDescriptor<MarchingTag, ndim>(shape, new MarchingTag[size]); auto tgrid = Array2D<double>(shape[0], shape[1]); auto tgrid_ad = OrthogonalGrid<double, ndim>(shape, tgrid.data_, visc_spacing); for(size_t j = 0; j < tgrid_ad.size(); ++j) { tgrid_ad[j] = INFINITY; } for(size_t j = 0; j < tag.size_; ++j) { tag_ad[j] = static_cast<MarchingTag>(tag[j]); } auto seed = agsis::vect<double, ndim>(); float zseed = zpad; seed[0] = static_cast<int>(zseed / visc_spacing + npad + 0.5); seed[1] = static_cast<int>(xseed / visc_spacing + npad + 0.5); FMM_SecondOrder(seed, tag_ad, viscosity_ad, tgrid_ad); size_t nsta = stalocs.nrow_; // auto ttable = Array2D<float>(nsta, gridlocs.nrow_); auto ttable = Array2D<uint16_t>(stalocs.nrow_, gridlocs.nrow_); auto *ttablerow = ttable.row(0); float *sl = nullptr; float *gl = nullptr; float dxy; float xdest, zdest; float tt; // auto ttable = Vector<float>(gridlocs.nrow_); // size_t nsta = 100; #pragma omp parallel for private(ttablerow, sl, gl, dxy, xdest, zdest, tt) for(size_t i = 0; i < nsta; ++i) { // printf("%lu\n", i); ttablerow = ttable.row(i); sl = stalocs.row(i); for(size_t j = 0; j < gridlocs.nrow_; ++j) { gl = gridlocs.row(j); dxy = process::DistCartesian2D(gl, sl) + std::abs(sl[2]); xdest = (xseed + dxy) / visc_spacing + npad; zdest = (gl[2] + zpad) / visc_spacing + npad; assert(xdest < tgrid.ncol_); assert(zdest < tgrid.nrow_); tt = tgrid(static_cast<size_t>(zdest + 0.5), static_cast<size_t>(xdest + 0.5)); ttablerow[j] = static_cast<uint16_t>(tt * sr + 0.5); } } return ttable; } // rows will be locs1 and cols locs2 Array2D<uint16_t> BuildRow1D(Array2D<float>& locs1, Array2D<float>& locs2, Array2D<double>& viscosity, Array2D<int>& tag, float sr) { // consts I use to make grids const size_t ndim = 2; const size_t npad = 2; const size_t visc_spacing = 5; const size_t zpad = 100; const float xseed = 100; // const auto shape = agsis::vect<size_t, ndim>(ds.nrow_, ds.ncol_); const auto shape = agsis::vect<size_t, ndim>(viscosity.ncol_, viscosity.nrow_); const size_t size = agsis::prod(shape); // double *data = new double[size]; // ds.load_full_buffer(data); auto viscosity_ad = ArrayDescriptor<double, ndim>(shape, viscosity.data_); // int *dtag = new int[size]; // hf["tag"].load_full_buffer(dtag); // auto tag_ad = ArrayDescriptor<MarchingTag, ndim>(shape, (MarchingTag *)tag.data_); auto tag_ad = ArrayDescriptor<MarchingTag, ndim>(shape, new MarchingTag[size]); auto tgrid = Array2D<double>(shape[0], shape[1]); auto tgrid_ad = OrthogonalGrid<double, ndim>(shape, tgrid.data_, visc_spacing); for(size_t j = 0; j < tgrid_ad.size(); ++j) { tgrid_ad[j] = INFINITY; } for(size_t j = 0; j < tag.size_; ++j) { tag_ad[j] = static_cast<MarchingTag>(tag[j]); } auto seed = agsis::vect<double, ndim>(); float zseed = zpad; seed[0] = static_cast<int>(zseed / visc_spacing + npad + 0.5); seed[1] = static_cast<int>(xseed / visc_spacing + npad + 0.5); FMM_SecondOrder(seed, tag_ad, viscosity_ad, tgrid_ad); auto ttable = Array2D<uint16_t>(locs1.nrow_, locs2.nrow_); #pragma omp parallel for for(size_t i = 0; i < locs1.nrow_; ++i) { // printf("%lu\n", i); uint16_t *ttablerow = ttable.row(i); float *l1p = locs1.row(i); for(size_t j = 0; j < locs2.nrow_; ++j) { float *l2p = locs2.row(j); float dxy = process::DistCartesian2D(l2p, l1p); float dz = std::abs(l1p[2] - l2p[2]); float xdest = (xseed + dxy) / visc_spacing + npad; float zdest = (zpad + dz) / visc_spacing + npad; assert(xdest < tgrid.ncol_); assert(zdest < tgrid.nrow_); float tt = tgrid(static_cast<size_t>(zdest + 0.5), static_cast<size_t>(xdest + 0.5)); ttablerow[j] = static_cast<uint16_t>(tt * sr + 0.5); } } return ttable; } } #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(16*t2-Nz-4,8)),t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(8*t1+Ny+13,8)),floord(16*t2+Ny+12,8)),floord(16*t1-16*t2+Nz+Ny+11,8));t3++) { for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(8*t1+Nx+13,32)),floord(16*t2+Nx+12,32)),floord(8*t3+Nx+4,32)),floord(16*t1-16*t2+Nz+Nx+11,32));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),8*t3+6),32*t4+30),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
laplace2d.c
/* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <string.h> #include <openacc.h> #include "timer.h" int main(int argc, char** argv) { int n = 4096; int m = 4096; int iter_max = 1000; const float pi = 2.0 * asinf(1.0f); const float tol = 1.0e-5f; float error = 1.0f; float A[n][m]; float Anew[n][m]; float y0[n]; memset(A, 0, n * m * sizeof(float)); // set boundary conditions for (int i = 0; i < m; i++) { A[0][i] = 0.f; A[n-1][i] = 0.f; } for (int j = 0; j < n; j++) { y0[j] = sinf(pi * j / (n-1)); A[j][0] = y0[j]; A[j][m-1] = y0[j]*expf(-pi); } #if _OPENACC acc_init(acc_device_nvidia); #endif printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; #pragma omp parallel for shared(Anew) for (int i = 1; i < m; i++) { Anew[0][i] = 0.f; Anew[n-1][i] = 0.f; } #pragma omp parallel for shared(Anew) for (int j = 1; j < n; j++) { Anew[j][0] = y0[j]; Anew[j][m-1] = y0[j]*expf(-pi); } #pragma acc data copy(A, Anew) while ( error > tol && iter < iter_max ) { error = 0.f; #pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25f * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmaxf( error, fabsf(Anew[j][i]-A[j][i])); } } #pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000.f); }
deconvolution_4x4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void deconv4x4s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*16 + q*16; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 4; const float* k2 = kernel0 + 8; const float* k3 = kernel0 + 12; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); float32x4_t _k3 = vld1q_f32(k3); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.row(i); float* outptr0 = outptr; float* outptr1 = outptr0 + outw; float* outptr2 = outptr1 + outw; float* outptr3 = outptr2 + outw; int j = 0; #if __ARM_NEON for (; j+3<w; j+=4) { float32x4_t _v = vld1q_f32(r0); // float32x4_t _out00 = vld1q_f32(outptr0 + 0); _out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0); vst1q_f32(outptr0 + 0, _out00); float32x4_t _out01 = vld1q_f32(outptr0 + 1); _out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1); vst1q_f32(outptr0 + 1, _out01); float32x4_t _out02 = vld1q_f32(outptr0 + 2); _out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0); vst1q_f32(outptr0 + 2, _out02); float32x4_t _out03 = vld1q_f32(outptr0 + 3); _out03 = vmlaq_lane_f32(_out03, _v, vget_high_f32(_k0), 1); vst1q_f32(outptr0 + 3, _out03); // float32x4_t _out10 = vld1q_f32(outptr1 + 0); _out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0); vst1q_f32(outptr1 + 0, _out10); float32x4_t _out11 = vld1q_f32(outptr1 + 1); _out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1); vst1q_f32(outptr1 + 1, _out11); float32x4_t _out12 = vld1q_f32(outptr1 + 2); _out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0); vst1q_f32(outptr1 + 2, _out12); float32x4_t _out13 = vld1q_f32(outptr1 + 3); _out13 = vmlaq_lane_f32(_out13, _v, vget_high_f32(_k1), 1); vst1q_f32(outptr1 + 3, _out13); // float32x4_t _out20 = vld1q_f32(outptr2 + 0); _out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0); vst1q_f32(outptr2 + 0, _out20); float32x4_t _out21 = vld1q_f32(outptr2 + 1); _out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1); vst1q_f32(outptr2 + 1, _out21); float32x4_t _out22 = vld1q_f32(outptr2 + 2); _out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0); vst1q_f32(outptr2 + 2, _out22); float32x4_t _out23 = vld1q_f32(outptr2 + 3); _out23 = vmlaq_lane_f32(_out23, _v, vget_high_f32(_k2), 1); vst1q_f32(outptr2 + 3, _out23); // float32x4_t _out30 = vld1q_f32(outptr3 + 0); _out30 = vmlaq_lane_f32(_out30, _v, vget_low_f32(_k3), 0); vst1q_f32(outptr3 + 0, _out30); float32x4_t _out31 = vld1q_f32(outptr3 + 1); _out31 = vmlaq_lane_f32(_out31, _v, vget_low_f32(_k3), 1); vst1q_f32(outptr3 + 1, _out31); float32x4_t _out32 = vld1q_f32(outptr3 + 2); _out32 = vmlaq_lane_f32(_out32, _v, vget_high_f32(_k3), 0); vst1q_f32(outptr3 + 2, _out32); float32x4_t _out33 = vld1q_f32(outptr3 + 3); _out33 = vmlaq_lane_f32(_out33, _v, vget_high_f32(_k3), 1); vst1q_f32(outptr3 + 3, _out33); r0 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr0[3] += val * k0[3]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr1[3] += val * k1[3]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; outptr2[3] += val * k2[3]; outptr3[0] += val * k3[0]; outptr3[1] += val * k3[1]; outptr3[2] += val * k3[2]; outptr3[3] += val * k3[3]; r0++; outptr0++; outptr1++; outptr2++; outptr3++; } } } } } static void deconv4x4s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*16 + q*16; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 4; const float* k2 = kernel0 + 8; const float* k3 = kernel0 + 12; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); float32x4_t _k3 = vld1q_f32(k3); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.row(i*2); float* outptr0 = outptr; float* outptr1 = outptr0 + outw; float* outptr2 = outptr1 + outw; float* outptr3 = outptr2 + outw; int j = 0; #if __ARM_NEON for (; j+3<w; j+=4) { float32x4_t _v = vld1q_f32(r0); // row 0 float32x4x2_t _out0 = vld2q_f32(outptr0); // 0,2,4,6 _out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_low_f32(_k0), 0); // 1,3,5,7 _out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_low_f32(_k0), 1); vst2q_f32(outptr0, _out0); _out0 = vld2q_f32(outptr0 + 2); // 2,4,6,8 _out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_high_f32(_k0), 0); // 3,5,7,9 _out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_high_f32(_k0), 1); vst2q_f32(outptr0 + 2, _out0); // row 1 float32x4x2_t _out1 = vld2q_f32(outptr1); // 0,2,4,6 _out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_low_f32(_k1), 0); // 1,3,5,7 _out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_low_f32(_k1), 1); vst2q_f32(outptr1, _out1); _out1 = vld2q_f32(outptr1 + 2); // 2,4,6,8 _out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_high_f32(_k1), 0); // 3,5,7,9 _out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_high_f32(_k1), 1); vst2q_f32(outptr1 + 2, _out1); // row 2 float32x4x2_t _out2 = vld2q_f32(outptr2); _out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_low_f32(_k2), 0); _out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_low_f32(_k2), 1); vst2q_f32(outptr2, _out2); _out2 = vld2q_f32(outptr2 + 2); _out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_high_f32(_k2), 0); _out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_high_f32(_k2), 1); vst2q_f32(outptr2 + 2, _out2); // row 3 float32x4x2_t _out3 = vld2q_f32(outptr3); _out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_low_f32(_k3), 0); _out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_low_f32(_k3), 1); vst2q_f32(outptr3, _out3); _out3 = vld2q_f32(outptr3 + 2); _out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_high_f32(_k3), 0); _out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_high_f32(_k3), 1); vst2q_f32(outptr3 + 2, _out3); r0 += 4; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr0[3] += val * k0[3]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr1[3] += val * k1[3]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; outptr2[3] += val * k2[3]; outptr3[0] += val * k3[0]; outptr3[1] += val * k3[1]; outptr3[2] += val * k3[2]; outptr3[3] += val * k3[3]; r0++; outptr0 += 2; outptr1 += 2; outptr2 += 2; outptr3 += 2; } } } } }
omp-ser.c
/* compilation: clang -pedantic -Wall -o omp-ser-cl omp-ser.c -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -L/usr/local/cuda/lib64 --cuda-path=/usr/local/cuda minimal compilation command: clang -o omp-ser-cl omp-ser.c -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda source: based on "OpenMP Application Programming Interface Examples - Version 5.0.0 - November 2019" Section 4.7.1 'Simple target data Construct' */ #include <stdlib.h> #include <omp.h> int main (){ const int N = 1000000; unsigned int i; unsigned int j; float *a; float *b; float *c; a = (float *)malloc(N * sizeof(float)); b = (float *)malloc(N * sizeof(float)); c = (float *)malloc(N * sizeof(float)); //srand((unsigned int)time(NULL)); for (i = 0; i < N; i++) { a[i] = 0; b[i] = ((float)rand() / (float)(RAND_MAX)) * 4.0; c[i] = ((float)rand() / (float)(RAND_MAX)) * 4.0; } #pragma omp target data map (to: c[0:N], b[0:N]) map(from: a[0:N]) { #pragma omp target #pragma omp parallel for for (j=0; j<N; j++) { a[j] = b[j]+3.73*c[j]; } } return 0; }
serial_tree_learner.h
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/tree_learner.h> #include <LightGBM/utils/random.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include "feature_histogram.hpp" #include "split_info.hpp" #include "data_partition.hpp" #include "leaf_splits.hpp" #include <cstdio> #include <vector> #include <random> #include <cmath> #include <memory> #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif using namespace json11; namespace LightGBM { /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data) override; void ResetConfig(const Config* config) override; Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian, Json& forced_split_json) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) override; void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override { data_partition_->SetUsedDataIndices(used_indices, num_data); } void AddPredictionToScore(const Tree* tree, double* out_score) const override { if (tree->num_leaves() <= 1) { return; } CHECK(tree->num_leaves() <= data_partition_->num_leaves()); #pragma omp parallel for schedule(static) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, const double* prediction, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, double prediction, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; protected: /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf); /* Force splits with forced_split_json dict and then return num splits forced.*/ virtual int32_t ForceSplits(Tree* tree, Json& forced_split_json, int* left_leaf, int* right_leaf, int* cur_depth, bool *aborted_last_force_split); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; double CalculateOndemandCosts(int feature_index, int leaf_index); /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief used for generate used features */ Random random_; /*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */ std::vector<int8_t> is_feature_used_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; std::vector<int> valid_feature_indices_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_hessians_; #endif /*! \brief Store ordered bin */ std::vector<std::unique_ptr<OrderedBin>> ordered_bins_; /*! \brief True if has ordered bin */ bool has_ordered_bin_ = false; /*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */ std::vector<char> is_data_in_leaf_; /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; int num_threads_; std::vector<int> ordered_bin_indices_; bool is_constant_hessian_; std::vector<bool> feature_used; std::vector<uint32_t> feature_used_in_data; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
wand-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W AAA N N DDDD % % W W A A NN N D D % % W W W AAAAA N N N D D % % WW WW A A N NN D D % % W W A A N N DDDD % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Wand View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickWand/studio.h" #include "MagickWand/MagickWand.h" #include "MagickWand/magick-wand-private.h" #include "MagickWand/wand.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define WandViewId "WandView" /* Typedef declarations. */ struct _WandView { size_t id; char name[MagickPathExtent], *description; RectangleInfo extent; MagickWand *wand; Image *image; CacheView *view; PixelWand ***pixel_wands; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneWandView() makes a copy of the specified wand view. % % The format of the CloneWandView method is: % % WandView *CloneWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport WandView *CloneWandView(const WandView *wand_view) { WandView *clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) clone_view->id); clone_view->description=ConstantString(wand_view->description); clone_view->image=CloneImage(wand_view->image,0,0,MagickTrue, wand_view->exception); clone_view->view=CloneCacheView(wand_view->view); clone_view->extent=wand_view->extent; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,wand_view->exception); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i],wand_view->extent.width); clone_view->debug=wand_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=MagickWandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyWandView() deallocates memory associated with a wand view. % % The format of the DestroyWandView method is: % % WandView *DestroyWandView(WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport WandView *DestroyWandView(WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width); wand_view->image=DestroyImage(wand_view->image); wand_view->view=DestroyCacheView(wand_view->view); wand_view->exception=DestroyExceptionInfo(wand_view->exception); wand_view->signature=(~MagickWandSignature); RelinquishWandId(wand_view->id); wand_view=(WandView *) RelinquishMagickMemory(wand_view); return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferWandViewIterator() iterates over three wand views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination wand view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const WandView *source, % const WandView *duplex,WandView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferWandViewIterator method is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % WandView *duplex,WandView *destination, % DuplexTransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o duplex: the duplex wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source, WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer, void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; register ssize_t x; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) duplex->extent.width; x++) { PixelSetQuantumPixel(duplex->image,duplex_pixels, duplex->pixel_wands[id][x]); duplex_pixels+=GetPixelChannels(duplex->image); } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelSetQuantumPixel(destination->image,destination_pixels, destination->pixel_wands[id][x]); destination_pixels+=GetPixelChannels(destination->image); } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], destination_pixels); destination_pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewException() returns the severity, reason, and description of any % error that occurs when utilizing a wand view. % % The format of the GetWandViewException method is: % % char *GetWandViewException(const WandView *wand_view, % ExceptionType *severity) % % A description of each parameter follows: % % o wand_view: the pixel wand_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetWandViewException(const WandView *wand_view, ExceptionType *severity) { char *description; assert(wand_view != (const WandView *) NULL); assert(wand_view->signature == MagickWandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); assert(severity != (ExceptionType *) NULL); *severity=wand_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); *description='\0'; if (wand_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->reason), MagickPathExtent); if (wand_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewExtent() returns the wand view extent. % % The format of the GetWandViewExtent method is: % % RectangleInfo GetWandViewExtent(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewIterator() iterates over the wand view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const WandView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetWandViewIterator method is: % % MagickBooleanType GetWandViewIterator(WandView *source, % GetWandViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView *source, GetWandViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (get == (GetWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const Quantum *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The format of the GetWandViewPixels method is: % % PixelWand *GetWandViewPixels(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand **GetWandViewPixels(const WandView *wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewWand() returns the magick wand associated with the wand view. % % The format of the GetWandViewWand method is: % % MagickWand *GetWandViewWand(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand *GetWandViewWand(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWandView() returns MagickTrue if the the parameter is verified as a wand % view object. % % The format of the IsWandView method is: % % MagickBooleanType IsWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView *wand_view) { size_t length; if (wand_view == (const WandView *) NULL) return(MagickFalse); if (wand_view->signature != MagickWandSignature) return(MagickFalse); length=strlen(WandViewId); if (LocaleNCompare(wand_view->name,WandViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandView() returns a wand view required for all other methods in the % Wand View API. % % The format of the NewWandView method is: % % WandView *NewWandView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands) { PixelWand ***pixel_wands; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands)); } return(pixel_wands); } WandExport WandView *NewWandView(MagickWand *wand) { ExceptionInfo *exception; WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickWandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->wand=wand; exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception); wand_view->extent.width=wand->images->columns; wand_view->extent.height=wand->images->rows; wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width); wand_view->exception=exception; if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=MagickWandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandViewExtent() returns a wand view required for all other methods % in the Wand View API. % % The format of the NewWandViewExtent method is: % % WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { ExceptionInfo *exception; WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickWandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception); wand_view->wand=wand; wand_view->extent.width=width; wand_view->extent.height=height; wand_view->extent.x=x; wand_view->extent.y=y; wand_view->exception=exception; wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=MagickWandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewDescription() associates a description with an image view. % % The format of the SetWandViewDescription method is: % % void SetWandViewDescription(WandView *image_view,const char *description) % % A description of each parameter follows: % % o wand_view: the wand view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView *wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); wand_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewIterator() iterates over the wand view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetWandViewIterator method is: % % MagickBooleanType SetWandViewIterator(WandView *destination, % SetWandViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the wand view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView *destination, SetWandViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == MagickWandSignature); if (set == (SetWandViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], pixels); pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetWandViewIterator) #endif proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferWandViewIterator() iterates over two wand views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination wand view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const WandView *source, % WandView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferWandViewIterator method is: % % MagickBooleanType TransferWandViewIterator(WandView *source, % WandView *destination,TransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView *source, WandView *destination,TransferWandViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (transfer == (TransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict pixels; register ssize_t x; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelSetQuantumPixel(destination->image,destination_pixels, destination->pixel_wands[id][x]); destination_pixels+=GetPixelChannels(destination->image); } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], destination_pixels); destination_pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateWandViewIterator() iterates over the wand view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateWandViewIterator method is: % % MagickBooleanType UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView *source, UpdateWandViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (update == (UpdateWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->extent.width; x++) { PixelGetQuantumPixel(source->image,source->pixel_wands[id][x],pixels); pixels+=GetPixelChannels(source->image); } sync=SyncCacheViewAuthenticPixels(source->view,source->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdateWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
pyfr_gemm_cm.c
/****************************************************************************** ** Copyright (c) 2016-2018, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <mkl.h> #include <libxsmm.h> static double sec(struct timeval start, struct timeval end) { return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6; } int main(int argc, char *argv[]) { int n,m,k; int lda,ldb,ldc; double* a; double* b; double* c1; double* c2; struct timeval l_start, l_end; double l_total = 0.0; int reps, i, j; const int nblock = 16; double alpha = 1.0, beta = 1.0; char transa = 'N', transb = 'N'; libxsmm_gemm_prefetch_type l_prefetch_op = LIBXSMM_PREFETCH_NONE; libxsmm_dmmfunction kernel = NULL; if (argc != 5) { fprintf(stderr, "Invalid ./a,out M N K reps\n"); exit(-1); } m = atoi(argv[1]); n = atoi(argv[2]); k = atoi(argv[3]); reps = atoi(argv[4]); /* this is col-major what you want to use for the sizes in question */ lda = m; ldb = k; ldc = m; if (n % nblock != 0) { fprintf(stderr, "N needs to be divisable by %i\n", nblock); exit(-1); } a = (double*)_mm_malloc(lda*k*sizeof(double), 64); b = (double*)_mm_malloc(ldb*n*sizeof(double), 64); c1 = (double*)_mm_malloc(ldc*n*sizeof(double), 64); c2 = (double*)_mm_malloc(ldc*n*sizeof(double), 64); #pragma omp parallel for for (i = 0; i < lda*k; i++) { a[i] = libxsmm_rand_f64(); } #pragma omp parallel for for (i = 0; i < ldb*n; i++) { b[i] = libxsmm_rand_f64(); } #pragma omp parallel for for (i = 0; i < ldc*n; i++) { c1[i] = 0; c2[i] = 0; } /* JIT Kernel */ kernel = libxsmm_dmmdispatch(m, nblock, k, NULL, NULL, NULL, NULL, NULL, NULL, &l_prefetch_op ); /* init MKL */ dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc); #pragma omp parallel for for (i = 0; i < ldc*n; i++) { c1[i] = 0; c2[i] = 0; } gettimeofday(&l_start, NULL); for ( j = 0; j < reps; j++ ) { dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc); } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); fprintf(stdout, "time[s] MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); gettimeofday(&l_start, NULL); for ( j = 0; j < reps; j++ ) { #pragma omp parallel for private(i) for ( i = 0; i < n; i+=nblock) { kernel( a, b+(ldb*i), c2+(ldc*i), NULL, NULL, NULL ); } gettimeofday(&l_end, NULL); } l_total = sec(l_start, l_end); fprintf(stdout, "time[s] libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); /* test result */ double max_error = 0.0; for ( i = 0; i < ldc*n; i++) { if (max_error < fabs(c1[i] - c2[i])) { max_error = fabs(c1[i] - c2[i]); } } printf("max error: %f\n\n", max_error); }
parallel-simple2.c
/* Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze (joachim.protze@tu-dresden.de), Jonas Hahnfeld (hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin Schulz. LLNL-CODE-773957 All rights reserved. This file is part of Archer. For details, see https://pruners.github.io/archer. Please also read https://github.com/PRUNERS/archer/blob/master/LICENSE. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> int main(int argc, char* argv[]) { int var = 0; // Create team of threads so that there is no implicit happens before // when creating the thread. #pragma omp parallel num_threads(2) { } var++; #pragma omp parallel num_threads(2) shared(var) { if (omp_get_thread_num() == 1) { var++; } } // implicit barrier fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK: DONE
Physics_Eta.c
/* * Physics_Eta.c * * Created on: Jul 27, 2017 * Author: abauville */ #include "stokes.h" #define USE_INVETA_EP false #define COMPUTE_SHEAR_VISCOSITY false void Physics_Eta_init(Model* Model) { Grid* Grid = &(Model->Grid); Physics* Physics = &(Model->Physics); MatProps* MatProps = &(Model->MatProps); Numerics* Numerics = &(Model->Numerics); int iy, ix, iCell; SinglePhase* thisPhaseInfo; compute P, T; int phase; compute EII, weight; compute B, E, V, n, taup, q, s, gamma; compute invEtaDiff, invEtaDisl, invEtaPei; compute R = Physics->R; compute eta, G, cohesion, frictionAngle, eta_thisPhase; compute sumOfWeights; // ======================================================= // Initial viscosity for (iy = 1; iy<Grid->nyEC-1; iy++) { for (ix = 1; ix<Grid->nxEC-1; ix++) { iCell = ix + iy*Grid->nxEC; //Physics->etaVisc[iCell] = Physics->eta0[iCell]; //Physics->eta[iCell] = Physics->eta0[iCell]; thisPhaseInfo = Physics->phaseListHead[iCell]; EII = fabs(Physics->epsRef)/1.0; #if (HEAT) P = Physics->P[iCell]; T = Physics->T[iCell]; #else T = 1.0; P = 0.0; #endif sumOfWeights = Physics->sumOfWeightsCells[iCell]; eta = 0.0; G = 0.0; cohesion = 0.0; frictionAngle = 0.0; compute invEta_EP = 0.0; while (thisPhaseInfo != NULL) { invEtaDiff = 0.0; invEtaDisl = 0.0; invEtaPei = 0.0; phase = thisPhaseInfo->phase; weight = thisPhaseInfo->weight; G += weight/MatProps->G[phase]; //G += weight*MatProps->G[phase]; //G += log10(MatProps->G[phase])*weight; //G += weight/log10(MatProps->G[phase]); cohesion += MatProps->cohesion[phase] * weight; frictionAngle += MatProps->frictionAngle[phase] * weight; if (MatProps->vDiff[phase].isActive) { B = MatProps->vDiff[phase].B; E = MatProps->vDiff[phase].E; V = MatProps->vDiff[phase].V; invEtaDiff = (2.0*(B*exp( - (E+V*P)/(R*T) ))); } if (MatProps->vDisl[phase].isActive) { B = MatProps->vDisl[phase].B; E = MatProps->vDisl[phase].E; V = MatProps->vDisl[phase].V; n = MatProps->vDisl[phase].n; invEtaDisl = (2.0*pow(B*exp( - (E+V*P)/(R*T) ),1.0/n)*pow(EII,-1.0/n+1.0)); } if (MatProps->vPei[phase].isActive) { B = MatProps->vPei[phase].B; E = MatProps->vPei[phase].E; V = MatProps->vPei[phase].V; gamma = MatProps->vPei[phase].gamma; taup = MatProps->vPei[phase].tau; q = MatProps->vPei[phase].q; s = (E+V*P)/(R*T)*pow((1.0-gamma),(q-1.0))*q*gamma; invEtaPei = (2.0*pow(B*pow(gamma*taup,-s)*exp( - (E+V*P)/(R*T) * pow((1.0-gamma),q) ) ,1.0/s)*pow(EII,-1.0/s+1.0) ); } thisPhaseInfo = thisPhaseInfo->next; eta_thisPhase = (1.0 / (invEtaDiff + invEtaDisl + invEtaPei)); invEta_EP += (1.0/(MatProps->G[phase]*Physics->dt) + 1.0/eta_thisPhase) * weight; eta += weight * eta_thisPhase; } eta = eta / sumOfWeights; //eta = pow(10.0,eta / sumOfWeights); invEta_EP /= sumOfWeights; /* if (eta>Numerics->etaMax) { eta = Numerics->etaMax; } if (eta<Numerics->etaMin) { eta = Numerics->etaMin; } */ Physics->eta[iCell] = eta; //Physics->G[iCell] = G/Physics->sumOfWeightsCells[iCell]/G; Physics->G[iCell] = Physics->sumOfWeightsCells[iCell]/G; //Physics->G[iCell] = G/Physics->sumOfWeightsCells[iCell]; //Physics->G[iCell] = pow(10.0,G/Physics->sumOfWeightsCells[iCell]); //Physics->G[iCell] = pow(10.0,Physics->sumOfWeightsCells[iCell]/G); Physics->khi[iCell] = 1E30; //Physics->Z[iCell] = 1.0/( 1.0/Physics->khi[iCell] + 1.0/Physics->eta[iCell] + 1.0/(Physics->G[iCell]*Physics->dt) ); #if (USE_INVETA_EP) Physics->Z[iCell] = 1.0/( invEta_EP) ; #else Physics->Z[iCell] = 1.0/( 1.0/Physics->eta[iCell] + 1.0/(Physics->G[iCell]*Physics->dt) ); #endif if (Physics->Z[iCell]<Numerics->etaMin) { Physics->Z[iCell] = Numerics->etaMin; } #if (DARCY) Physics->eta_b[iCell] = Physics->eta[iCell]/(Physics->phi[iCell]); Physics->khi_b[iCell] = 1e30; Physics->Zb[iCell] = 1.0/( 1.0/Physics->eta_b[iCell] + 1.0/(Physics->G[iCell]/(sqrt(Physics->phi[iCell]))*Physics->dt) ); #endif } } Physics_CellVal_SideValues_copyNeighbours_Global(Physics->eta, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->khi, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->G, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Z, Grid); #if (DARCY) Physics_CellVal_SideValues_copyNeighbours_Global(Physics->khi_b, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->eta_b, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Zb, Grid); #endif int iNode; for (iy = 0; iy<Grid->nyS; iy++) { for (ix = 0; ix<Grid->nxS; ix++) { iNode = ix + iy*Grid->nxS; Physics->etaShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->eta, ix , iy, Grid->nxEC); Physics->GShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->G, ix , iy, Grid->nxEC); Physics->khiShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->khi, ix , iy, Grid->nxEC); Physics->ZShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->Z, ix , iy, Grid->nxEC); } } } void Physics_Eta_Simple_updateGlobal(Model* Model) { Grid* Grid = &(Model->Grid); Physics* Physics = &(Model->Physics); int iNode, iy, ix; // ===== get EffStrainRate ===== Physics_Eta_EffStrainRate_updateGlobal (Model); // ===== get EffStrainRate ===== // ===== get the Z as a visco-elastic predictor ===== Physics_Eta_VEpredictor_updateGlobalCell(Model); // ================================================================================ // Shear nodes viscosity //#pragma omp parallel for private(iy,ix, iNode) OMP_SCHEDULE for (iy = 0; iy<Grid->nyS; iy++) { for (ix = 0; ix<Grid->nxS; ix++) { iNode = ix + iy*Grid->nxS; Physics->etaShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->eta, ix , iy, Grid->nxEC); Physics->khiShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->khi, ix , iy, Grid->nxEC); Physics->GShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->G, ix , iy, Grid->nxEC); Physics->ZShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->Z, ix , iy, Grid->nxEC); } } // Shear nodes viscosity // ================================================================================ } void Physics_Eta_FromParticles_updateGlobal(Model* Model) { Grid* Grid = &(Model->Grid); MatProps* MatProps = &(Model->MatProps); Particles* Particles = &(Model->Particles); Physics* Physics = &(Model->Physics); compute locX, locY; int ix, iy; INIT_PARTICLE int iCell; for (iCell = 0; iCell < Grid->nECTot; ++iCell) { Physics->eta [iCell] = 0.0; Physics->G [iCell] = 0.0; Physics->khi [iCell] = 0.0; Physics->Z [iCell] = 0.0; } for (iNode = 0; iNode < Grid->nSTot; ++iNode) { Physics->etaShear [iNode] = 0.0; Physics->GShear [iNode] = 0.0; Physics->khiShear [iNode] = 0.0; Physics->ZShear [iNode] = 0.0; } compute EII; compute* EIICell = (compute*) malloc(Grid->nECTot * sizeof(compute)); compute* SII0Cell = (compute*) malloc(Grid->nECTot * sizeof(compute)); compute* Exx = (compute*) malloc(Grid->nECTot * sizeof(compute)); compute* Exy = (compute*) malloc(Grid->nSTot * sizeof(compute)); compute* dVxdyGrid = (compute*) malloc(Grid->nSTot * sizeof(compute)); compute* dVydxGrid = (compute*) malloc(Grid->nSTot * sizeof(compute)); compute* Rotxy = (compute*) malloc(Grid->nSTot * sizeof(compute)); compute dVxdy, dVydx, dVxdx, dVydy; compute sq_sigma_xy0, sigma_xx0; for (iy = 1; iy<Grid->nyEC-1; iy++) { for (ix = 1; ix<Grid->nxEC-1; ix++) { iCell = ix + iy*Grid->nxEC; dVxdx = (Physics->Vx[(ix) + (iy)*Grid->nxVx] - Physics->Vx[(ix-1) + (iy)*Grid->nxVx])/Grid->dx; dVydy = (Physics->Vy[(ix) + (iy)*Grid->nxVy] - Physics->Vy[(ix) + (iy-1)*Grid->nxVy])/Grid->dy; Exx[iCell] = 0.5*(dVxdx-dVydy); Physics_StrainRateInvariant_getLocalCell(Model, ix, iy, &EII); EIICell[iCell] = EII; sq_sigma_xy0 = Physics->sigma_xy_0[ix-1+(iy-1)*Grid->nxS] * Physics->sigma_xy_0[ix-1+(iy-1)*Grid->nxS]; sq_sigma_xy0 += Physics->sigma_xy_0[ix +(iy-1)*Grid->nxS] * Physics->sigma_xy_0[ix +(iy-1)*Grid->nxS]; sq_sigma_xy0 += Physics->sigma_xy_0[ix-1+(iy )*Grid->nxS] * Physics->sigma_xy_0[ix-1+(iy )*Grid->nxS]; sq_sigma_xy0 += Physics->sigma_xy_0[ix +(iy )*Grid->nxS] * Physics->sigma_xy_0[ix +(iy )*Grid->nxS]; sigma_xx0 = Physics->sigma_xx_0[iCell];// + Physics->Dsigma_xx_0[iCell]; SII0Cell[iCell] = sqrt((sigma_xx0)*(sigma_xx0) + 0.25*(sq_sigma_xy0)); } } Physics_CellVal_SideValues_copyNeighbours_Global(Exx, Grid); for (iy = 0; iy<Grid->nyS; iy++) { for (ix = 0; ix<Grid->nxS; ix++) { iNode = ix + iy*Grid->nxS; dVxdy = ( Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix + (iy )*Grid->nxVx] )/Grid->dy; dVydx = ( Physics->Vy[ix+1+ iy*Grid->nxVy] - Physics->Vy[ix + iy*Grid->nxVy] )/Grid->dx; dVxdyGrid[iNode] = dVxdy; dVydxGrid[iNode] = dVydx; Exy[iNode] = 0.5*(dVxdy+dVydx); Rotxy[iNode] = 0.5*(dVxdy-dVydx); } } compute ExyPart, ExxPart; for (iy = 0; iy < Grid->nyS; ++iy) { for (ix = 0; ix < Grid->nxS; ++ix) { iNode = ix + (iy )*Grid->nxS; thisParticle = Particles->linkHead[iNode]; // Loop through the particles in the shifted cell // ====================================== while (thisParticle!=NULL) { locX = Particles_getLocX(ix, thisParticle->x,Grid); locY = Particles_getLocY(iy, thisParticle->y,Grid); int IxN[4], IyN[4]; IxN[0] = 0; IyN[0] = 0; // lower left IxN[1] = 1; IyN[1] = 0; // lower right IxN[2] = 0; IyN[2] = 1; // upper left IxN[3] = 1; IyN[3] = 1; // upper right // ===== weight cells ===== int i; if (locX>=0 && locY>=0) { // upper right i = 3; } else if (locX<0 && locY>=0) { // upper left // the particle is in the SE quadrant, the cell center 1 is NW (wrt to the node ix,iy) i = 2; } else if (locX>=0 && locY<0) { // lower right i = 1; } else if (locX<0 && locY<0) { // lower left i = 0; } else { printf("error in Interp_ECVal_Cell2Particle_Local. No case was triggered\n."); exit(0); } iCell = (ix+IxN[i] + (iy+IyN[i]) * Grid->nxEC); compute weightCell = fabs(locX)*fabs(locY); // ===== weight cells ===== // ===== weight nodes ===== compute weightNode = (1.0 - fabs(locX)) * (1.0 - fabs(locY)); // ===== weight nodes ===== int phase = thisParticle->phase; ExxPart = Interp_ECVal_Cell2Particle_Local(Exx, ix, iy, Grid->nxEC, locX, locY); ExyPart = Interp_NodeVal_Node2Particle_Local(Exy, ix, iy, Grid->nxS, Grid->nyS, locX, locY); EII = sqrt(ExxPart*ExxPart + ExyPart*ExyPart); compute eta; compute T = 1.0; compute invEtaDiff = 0.0; compute invEtaDisl = 0.0; compute invEtaPei = 0.0; compute BDiff, BDisl, BPei; compute B, E, V, n, gamma, taup, q, s; compute R = 1.0; compute P = 0.0; if (MatProps->vDiff[phase].isActive) { B = MatProps->vDiff[phase].B; E = MatProps->vDiff[phase].E; V = MatProps->vDiff[phase].V; BDiff = B*exp( - (E+V*P)/(R*T) ); invEtaDiff = (2.0*(BDiff)); } if (MatProps->vDisl[phase].isActive) { B = MatProps->vDisl[phase].B; E = MatProps->vDisl[phase].E; V = MatProps->vDisl[phase].V; n = MatProps->vDisl[phase].n; BDisl = B*exp( - (E+V*P)/(R*T) ); invEtaDisl = (2.0*pow(BDisl,1.0/n)*pow(EII,-1.0/n+1.0)); } if (MatProps->vPei[phase].isActive) { B = MatProps->vPei[phase].B; E = MatProps->vPei[phase].E; V = MatProps->vPei[phase].V; gamma = MatProps->vPei[phase].gamma; taup = MatProps->vPei[phase].tau; q = MatProps->vPei[phase].q; s = (E+V*P)/(R*T)*pow((1.0-gamma),(q-1.0))*q*gamma; BPei = B*pow(gamma*taup,-s)*exp( - (E+V*P)/(R*T) * pow((1.0-gamma),q) ); invEtaPei = (2.0*pow(BPei ,1.0/s)*pow(EII,-1.0/s+1.0) ); } eta = (1.0 / (invEtaDiff + invEtaDisl + invEtaPei)); compute G = MatProps->G[phase]; compute cohesion = MatProps->cohesion[phase]; compute frictionAngle = MatProps->frictionAngle[phase]; compute dt = Physics->dt; compute Z = 1.0/(1.0/eta + 1.0/(G*dt)); compute Sxx0 = thisParticle->sigma_xx_0; compute Sxy0 = thisParticle->sigma_xy_0; //compute SII0 = sqrt(Sxx0*Sxx0 + Sxy0*Sxy0); /* #if (USE_UPPER_CONVECTED) compute RotxyPart = Interp_NodeVal_Node2Particle_Local(Rotxy, ix, iy, Grid->nxS, Grid->nyS, locX, locY); int nxEC = Grid->nxEC; int nxS = Grid->nxS; int nyS = Grid->nyS; compute sqEII_Part = ExxPart*ExxPart + ExyPart*ExyPart; compute sqSII0_Part = Sxx0*Sxx0 + Sxy0*Sxy0; compute dVxdyPart = Interp_NodeVal_Node2Particle_Local(dVxdyGrid, ix, iy, Grid->nxS, Grid->nyS, locX, locY); compute dVydxPart = Interp_NodeVal_Node2Particle_Local(dVydxGrid, ix, iy, Grid->nxS, Grid->nyS, locX, locY); //compute Eff_strainRate = 1.0/(2.0*G*dt) * sqrt(pow((2.0*ExxPart*G*dt + Sxx0 + 2.0*dt*(Sxx0*ExxPart + Sxy0*dVxdyPart)),2.0) + pow((2.0*ExyPart*G*dt - Sxx0*dt*2.0*RotxyPart+ Sxy0),2.0)); compute Eff_strainRate = sqrt(sqEII_Part + ExxPart*Sxx0/(G*dt) + ExyPart*Sxy0/(G*dt) + (1.0/(2.0*G*dt))*(1.0/(2.0*G*dt))*sqSII0_Part ); #else compute Eff_strainRate = sqrt(EII*EII + ExxPart*Sxx0/(G*dt) + ExyPart*Sxy0/(G*dt) + (1.0/(2.0*G*dt))*(1.0/(2.0*G*dt))*SII0*SII0 ); #endif */ compute Eff_strainRate = Interp_ECVal_Cell2Particle_Local(Physics->EII_eff,ix,iy,Grid->nxEC,locX,locY); compute sigmaII = 2.0*Z*Eff_strainRate; compute khi; compute phi = 0.0; compute Pe = Interp_ECVal_Cell2Particle_Local(Physics->P,ix,iy,Grid->nxEC, locX, locY); Pe = fmax(Pe,0.0); compute sigma_y = cohesion*cos(frictionAngle) + Pe*sin(frictionAngle); if (sigmaII > sigma_y) { khi = 1.0/((1.0-phi)/sigma_y * (2.0*Eff_strainRate) - 1.0/(G*dt) - 1.0/eta ); if (khi<0.0) { // quite rare case where (1.0-phi)/sigma_y * (2.0*Eff_strainRate) < - 1.0/(G*dt) - 1.0/eta // if it happens then I consider the case where there are == , which means khi -> inf printf("khi = %.2e, eta = %.2e, G = %.2e, dt = %.2e, Eff_Strainrate = %.2e, 1-phi = %.2e, sigma_y = %.2e, Pe = %.2e, Pmin = %.2e\n", khi, eta, G, dt, Eff_strainRate, 1.0-phi, sigma_y, Pe, -cohesion*cos(frictionAngle)/sin(frictionAngle)); printf("WTF!\n"); khi = 1e30; //exit(0); } #if (USE_INVETA_EP) Z = (1.0-phi)*1.0/(1.0/khi + invEta_EP); #else Z = (1.0-phi)*1.0/(1.0/khi + 1.0/eta + 1.0/(G*dt)); #endif sigmaII = 2.0*Z*Eff_strainRate; } else { khi = 1e30; } Physics->eta [iCell] += eta * weightCell; Physics->G [iCell] += weightCell / G; Physics->khi [iCell] += khi * weightCell; //Physics->khi [iCell] += weightCell / khi; Physics->Z [iCell] += Z * weightCell; Physics->etaShear [iNode] += eta * weightNode; Physics->GShear [iNode] += weightNode / G; Physics->khiShear [iNode] += khi * weightNode; //Physics->khiShear [iNode] += weightNode / khi; Physics->ZShear [iNode] += Z * weightNode; thisParticle = thisParticle->next; } } } for (iCell = 0; iCell < Grid->nECTot; ++iCell) { //printf("sumOfWeights[%i] = %.2e\n", iCell, Physics->sumOfWeightsCells [iCell]); if (Physics->sumOfWeightsCells [iCell]==0.0) { printf("error in Interp_All_Particles2Grid_Global. Cell #%i received no contribution from particles (i.e. empty cell).\n", iCell); exit(0); } Physics->eta [iCell] /= Physics->sumOfWeightsCells [iCell]; Physics->G [iCell] = Physics->sumOfWeightsCells [iCell] / Physics->G [iCell]; Physics->khi [iCell] /= Physics->sumOfWeightsCells [iCell]; //Physics->khi [iCell] = Physics->sumOfWeightsCells [iCell] / Physics->khi [iCell]; //Physics->Z [iCell] /= Physics->sumOfWeightsCells [iCell]; compute dt = Physics->dt; compute phi = 0.0; compute eta = Physics->eta [iCell]; compute khi = Physics->khi [iCell]; compute G = Physics->G [iCell]; Physics->Z [iCell] = (1.0-phi)*1.0/(1.0/khi + 1.0/eta + 1.0/(G*dt)); } Physics_CellVal_SideValues_copyNeighbours_Global(Physics->eta, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->G, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->khi, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Z, Grid); for (iNode = 0; iNode < Grid->nSTot; ++iNode) { Physics->etaShear [iNode] /= Physics->sumOfWeightsNodes[iNode]; // arithmetic average Physics->GShear [iNode] = Physics->sumOfWeightsNodes[iNode] / Physics->GShear [iNode]; // arithmetic average Physics->khiShear [iNode] /= Physics->sumOfWeightsNodes[iNode]; // arithmetic average //Physics->khiShear [iNode] = Physics->sumOfWeightsNodes[iNode] / Physics->khiShear [iNode]; // arithmetic average //Physics->ZShear [iNode] /= Physics->sumOfWeightsNodes[iNode]; // arithmetic average compute dt = Physics->dt; compute phi = 0.0; compute eta = Physics->etaShear [iNode]; compute khi = Physics->khiShear [iNode]; compute G = Physics->GShear [iNode]; Physics->ZShear [iNode] = (1.0-phi)*1.0/(1.0/khi + 1.0/eta + 1.0/(G*dt)); } //END_PARTICLES free(Exx); free(Exy); free(EIICell); free(SII0Cell); free(Rotxy); free(dVxdyGrid); free(dVydxGrid); } void Physics_Eta_EffStrainRate_updateGlobal(Model* Model) { Grid* Grid = &(Model->Grid); Physics* Physics = &(Model->Physics); Numerics* Numerics = &(Model->Numerics); int ix, iy; compute dVxdy, dVydx, dVxdx, dVydy; compute* Exx_VE_CellGlobal = (compute*) malloc(Grid->nECTot * sizeof(compute)); compute* Exy_VE_NodeGlobal = (compute*) malloc(Grid->nSTot * sizeof(compute)); compute dt = Physics->dt; int iCell; #pragma omp parallel for private(iy,ix, iCell, dVxdx, dVydy) OMP_SCHEDULE for (iy = 1; iy<Grid->nyEC-1; iy++) { for (ix = 1; ix<Grid->nxEC-1; ix++) { iCell = ix + iy*Grid->nxEC; dVxdx = (Physics->Vx[(ix) + (iy)*Grid->nxVx] - Physics->Vx[(ix-1) + (iy)*Grid->nxVx])/Grid->dx; dVydy = (Physics->Vy[(ix) + (iy)*Grid->nxVy] - Physics->Vy[(ix) + (iy-1)*Grid->nxVy])/Grid->dy; compute G = Physics->G[iCell]; Exx_VE_CellGlobal[iCell] = 0.5*(dVxdx-dVydy) + Physics->sigma_xx_0[iCell]/(2.0*G*dt); } } Physics_CellVal_SideValues_copyNeighbours_Global(Exx_VE_CellGlobal, Grid); int iNode; #pragma omp parallel for private(iy,ix, iNode, dVxdy, dVydx) OMP_SCHEDULE for (iy = 0; iy<Grid->nyS; iy++) { for (ix = 0; ix<Grid->nxS; ix++) { iNode = ix + iy*Grid->nxS; dVxdy = ( Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix + (iy )*Grid->nxVx] )/Grid->dy; dVydx = ( Physics->Vy[ix+1+ iy*Grid->nxVy] - Physics->Vy[ix + iy*Grid->nxVy] )/Grid->dx; compute G = Physics->GShear[iNode]; Exy_VE_NodeGlobal[iNode] = 0.5*(dVxdy+dVydx) + Physics->sigma_xy_0[iNode]/(2.0*G*dt); } } #pragma omp parallel for private(iy,ix, iCell) OMP_SCHEDULE for (iy = 1; iy<Grid->nyEC-1; iy++) { for (ix = 1; ix<Grid->nxEC-1; ix++) { iCell = ix + iy*Grid->nxEC; compute Exx_VE_sq = Exx_VE_CellGlobal[iCell]*Exx_VE_CellGlobal[iCell]; compute Exy_VE_sq = Interp_Product_NodeVal_Node2Cell_Local(Exy_VE_NodeGlobal , Exy_VE_NodeGlobal, ix, iy, Grid->nxS); Physics->EII_eff[iCell] = sqrt(Exx_VE_sq + Exy_VE_sq); } } Physics_CellVal_SideValues_copyNeighbours_Global(Physics->EII_eff, Grid); #pragma omp parallel for private(iy,ix, iNode) OMP_SCHEDULE for (iy = 0; iy<Grid->nyS; iy++) { for (ix = 0; ix<Grid->nxS; ix++) { iNode = ix + iy*Grid->nxS; compute Exy_VE_sq = Exy_VE_NodeGlobal[iNode] * Exy_VE_NodeGlobal[iNode]; compute Exx_VE_sq = Interp_Product_ECVal_Cell2Node_Local(Exx_VE_CellGlobal,Exx_VE_CellGlobal,ix,iy,Grid->nxEC); Physics->EII_effShear[iNode] = sqrt(Exx_VE_sq + Exy_VE_sq); } } free(Exx_VE_CellGlobal); free(Exy_VE_NodeGlobal); } void Physics_Eta_VEpredictor_updateGlobalCell(Model* Model) { // Update Physics->Z and Physics->eta // according to the Visco-elastic predictor Grid* Grid = &(Model->Grid); Physics* Physics = &(Model->Physics); MatProps* MatProps = &(Model->MatProps); int iy, ix, iCell; compute T, P, phi; compute alpha; compute eta, G; SinglePhase* thisPhaseInfo; compute invEtaDiff, invEtaDisl, invEtaPei; int phase; compute weight; compute B, E, V, n, taup, gamma, q, s; compute R = Physics->R; compute ZUpper, ZLower; compute BDiff[NB_PHASE_MAX], BDisl[NB_PHASE_MAX], BPei[NB_PHASE_MAX]; compute EII; compute eta_thisPhase; compute dt = Physics->dt; compute Z, Zcorr, PrevZcorr; compute sigmaII; compute tol = 1e-7; for (iy = 1; iy<Grid->nyEC-1; iy++) { for (ix = 1; ix<Grid->nxEC-1; ix++) { iCell = ix + iy*Grid->nxEC; Physics_StrainRateInvariant_getLocalCell(Model, ix, iy, &EII); #if (HEAT) P = Physics->P[iCell]; T = Physics->T[iCell]; #else T = 1.0; P = 0.0; #endif #if (DARCY) phi = Physics->phi[iCell]; #else phi = 0.0; #endif compute sumOfWeights = Physics->sumOfWeightsCells[iCell]; alpha = 1.0; compute invEta_EP = 0.0; // Precompute B and viscosities using EII eta = 0.0; G = 0.0; compute maxInvVisc = 0.0; thisPhaseInfo = Physics->phaseListHead[iCell]; while (thisPhaseInfo != NULL) { invEtaDiff = 0.0; invEtaDisl = 0.0; invEtaPei = 0.0; phase = thisPhaseInfo->phase; weight = thisPhaseInfo->weight; G += weight/MatProps->G[phase]; if (MatProps->vDiff[phase].isActive) { B = MatProps->vDiff[phase].B; E = MatProps->vDiff[phase].E; V = MatProps->vDiff[phase].V; BDiff[phase] = B*exp( - (E+V*P)/(R*T) ); invEtaDiff = (2.0*(BDiff[phase])); maxInvVisc = fmax(invEtaDiff,maxInvVisc); } if (MatProps->vDisl[phase].isActive) { B = MatProps->vDisl[phase].B; E = MatProps->vDisl[phase].E; V = MatProps->vDisl[phase].V; n = MatProps->vDisl[phase].n; BDisl[phase] = B*exp( - (E+V*P)/(R*T) ); invEtaDisl = (2.0*pow(BDisl[phase],1.0/n)*pow(EII,-1.0/n+1.0)); maxInvVisc = fmax(invEtaDisl,maxInvVisc); } if (MatProps->vPei[phase].isActive) { B = MatProps->vPei[phase].B; E = MatProps->vPei[phase].E; V = MatProps->vPei[phase].V; gamma = MatProps->vPei[phase].gamma; taup = MatProps->vPei[phase].tau; q = MatProps->vPei[phase].q; s = (E+V*P)/(R*T)*pow((1.0-gamma),(q-1.0))*q*gamma; BPei[phase] = B*pow(gamma*taup,-s)*exp( - (E+V*P)/(R*T) * pow((1.0-gamma),q) ); invEtaPei = (2.0*pow(BPei[phase] ,1.0/s)*pow(EII,-1.0/s+1.0) ); maxInvVisc = fmax(invEtaPei,maxInvVisc); } eta_thisPhase = (1.0 / (invEtaDiff + invEtaDisl + invEtaPei)); eta += weight * eta_thisPhase; invEta_EP += (1.0/(MatProps->G[phase]*dt)+1.0/eta_thisPhase) * weight; thisPhaseInfo = thisPhaseInfo->next; } G = sumOfWeights / G; eta /= sumOfWeights; invEta_EP /= sumOfWeights; maxInvVisc = fmax(1.0/(G*dt),maxInvVisc); ZUpper = 1.0/maxInvVisc; if (ZUpper>1e10) { ZUpper = 1e10; } ZLower = 1.0/(1.0/(G*dt) + 1.0/eta); Z = 0.5*(ZUpper+ZLower); Zcorr = Z; sigmaII = 2.0*Z*Physics->EII_eff[iCell]; // compute viscosities using sigmaII while (fabs(Zcorr/Z)>tol) { eta = 0.0; thisPhaseInfo = Physics->phaseListHead[iCell]; invEta_EP = 0.0; while (thisPhaseInfo != NULL) { invEtaDiff = 0.0; invEtaDisl = 0.0; invEtaPei = 0.0; phase = thisPhaseInfo->phase; weight = thisPhaseInfo->weight; if (MatProps->vDiff[phase].isActive) { invEtaDiff = (2.0*(BDiff[phase])); } if (MatProps->vDisl[phase].isActive) { n = MatProps->vDisl[phase].n; invEtaDisl = (2.0*BDisl[phase]*pow(sigmaII,-1.0+n)); } if (MatProps->vPei[phase].isActive) { E = MatProps->vPei[phase].E; V = MatProps->vPei[phase].V; gamma = MatProps->vPei[phase].gamma; q = MatProps->vPei[phase].q; s = (E+V*P)/(R*T)*pow((1.0-gamma),(q-1.0))*q*gamma; invEtaPei = ( 2.0*BPei[phase]*pow(sigmaII,-1.0+s) ); } eta_thisPhase = (1.0 / (invEtaDiff + invEtaDisl + invEtaPei)); eta += weight * eta_thisPhase; thisPhaseInfo = thisPhaseInfo->next; invEta_EP += (1.0/(MatProps->G[phase]*dt)+1.0/eta_thisPhase) * weight; } eta /= sumOfWeights; //eta = 1e30; //eta = pow(10.0,eta / sumOfWeights); //invEta_EP = pow(10.0,invEta_EP / sumOfWeights); invEta_EP = invEta_EP / sumOfWeights; PrevZcorr = Zcorr; //Zcorr = (1.0-phi)*(1.0/(1.0/(G*dt) + 1.0/eta)) - Z; Zcorr = (1.0-phi)*1.0/invEta_EP - Z; if (Zcorr/PrevZcorr<-0.9) { alpha = alpha/2.0; } Z += alpha*Zcorr; sigmaII = 2.0*Z*Physics->EII_eff[iCell]; } Physics->Z[iCell] = Z; Physics->eta[iCell] = eta; Physics->G[iCell] = G; } } Physics_CellVal_SideValues_copyNeighbours_Global(Physics->eta, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Z, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->G, Grid); } void Physics_Eta_computeLambda_FromParticles_updateGlobal(Model* Model, bool updateStresses) { Grid* Grid = &(Model->Grid); MatProps* MatProps = &(Model->MatProps); Particles* Particles = &(Model->Particles); Physics* Physics = &(Model->Physics); //Numerics* Numerics = &(Model->Numerics); compute locX, locY; int ix, iy; INIT_PARTICLE compute* Exx_Grid = (compute*) malloc(Grid->nECTot * sizeof(compute)); compute* Exy_Grid = (compute*) malloc(Grid->nSTot * sizeof(compute)); compute dVxdy, dVydx, dVxdx, dVydy; int iCell; #pragma omp parallel for private(iy,ix, iCell, dVxdx, dVydy) OMP_SCHEDULE for (iy = 1; iy<Grid->nyEC-1; iy++) { for (ix = 1; ix<Grid->nxEC-1; ix++) { iCell = ix + iy*Grid->nxEC; dVxdx = (Physics->Vx[(ix) + (iy)*Grid->nxVx] - Physics->Vx[(ix-1) + (iy)*Grid->nxVx])/Grid->dx; dVydy = (Physics->Vy[(ix) + (iy)*Grid->nxVy] - Physics->Vy[(ix) + (iy-1)*Grid->nxVy])/Grid->dy; Exx_Grid[iCell] = 0.5*(dVxdx-dVydy); } } Physics_CellVal_SideValues_copyNeighbours_Global(Exx_Grid, Grid); #pragma omp parallel for private(iy,ix, iNode, dVxdy, dVydx) OMP_SCHEDULE for (iy = 0; iy<Grid->nyS; iy++) { for (ix = 0; ix<Grid->nxS; ix++) { iNode = ix + iy*Grid->nxS; dVxdy = ( Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix + (iy )*Grid->nxVx] )/Grid->dy; dVydx = ( Physics->Vy[ix+1+ iy*Grid->nxVy] - Physics->Vy[ix + iy*Grid->nxVy] )/Grid->dx; Exy_Grid[iNode] = 0.5*(dVxdy+dVydx); } } compute* sumOfWeightsCells = (compute*) malloc(Grid->nECTot*sizeof(compute)); compute* sumOfWeightsNodes = (compute*) malloc(Grid->nSTot *sizeof(compute)); #pragma omp parallel for private(iy,ix, iCell) OMP_SCHEDULE for(iCell=0; iCell<Grid->nECTot;++iCell) { Physics->Lambda[iCell] = 0.0; Physics->khi[iCell] = 0.0; sumOfWeightsCells[iCell] = 0.0; } #pragma omp parallel for private(iy,ix, iNode) OMP_SCHEDULE for(iNode=0; iNode<Grid->nSTot;++iNode) { Physics->LambdaShear[iNode] = 0.0; sumOfWeightsNodes[iNode] = 0.0; } int IxN[4], IyN[4]; IxN[0] = 0; IyN[0] = 0; // lower left IxN[1] = 1; IyN[1] = 0; // lower right IxN[2] = 0; IyN[2] = 1; // upper left IxN[3] = 1; IyN[3] = 1; // upper right // Loop through particles and compute lambda compute dt = Physics->dt; int iColor; // indexing of the color group for nodes. Nodes of the same color don't collide with each other. i.e. similar to matrix coloring int ixStart[4] = {0,0,1,1}; int iyStart[4] = {0,1,0,1}; for (iColor = 0; iColor < 4; ++iColor) { #pragma omp parallel for private(ix, iy, iNode, thisParticle, locX, locY) OMP_SCHEDULE for (iy = iyStart[iColor]; iy < Grid->nyS; iy+=2) { // Gives better result not to give contribution from the boundaries for (ix = ixStart[iColor]; ix < Grid->nxS; ix+=2) { // I don't get why though //for (iy = 0; iy < Grid->nyS; ++iy) { // for (ix = 0; ix < Grid->nxS; ++ix) { iNode = ix + (iy )*Grid->nxS; thisParticle = Particles->linkHead[iNode]; while (thisParticle!=NULL) { locX = Particles_getLocX(ix, thisParticle->x,Grid); locY = Particles_getLocY(iy, thisParticle->y,Grid); if (fabs(locX)>1.0 || fabs(locY)>1.0 ) { printf("Error locXY, locX = %.1f, locY = %.1f\n", locX, locY); exit(0); } compute Lambda, lambda, khi; int phase = thisParticle->phase; if (phase == Physics->phaseAir || phase == Physics->phaseWater) { // First part of the correction of stresses on the particles: add subgrid (adding remaining will be done in a second step) Lambda = 1.0; khi = 1e30; } else { compute Exx = Interp_ECVal_Cell2Particle_Local(Exx_Grid, ix, iy, Grid->nxEC, locX, locY); compute Exy = Interp_NodeVal_Node2Particle_Local(Exy_Grid, ix, iy, Grid->nxS, Grid->nyS, locX, locY); compute Z = Interp_ECVal_Cell2Particle_Local(Physics->Z, ix, iy, Grid->nxEC, locX, locY); compute Pe = Interp_ECVal_Cell2Particle_Local(Physics->P, ix, iy, Grid->nxEC, locX, locY); // Fail safe if (Pe<0.0) { Pe = 0.0; } compute G = MatProps->G[phase]; compute cohesion = MatProps->cohesion[phase]; compute fAngle = MatProps->frictionAngle[phase]; compute Txx0 = thisParticle->sigma_xx_0; compute Txy0 = thisParticle->sigma_xy_0; compute Ty = cohesion*cos(fAngle) + Pe*sin(fAngle); compute Exx_eff = (Exx + Txx0/(2.0*G*dt)); compute Exy_eff = (Exy + Txy0/(2.0*G*dt)); compute EII_eff = sqrt(Exx_eff*Exx_eff + Exy_eff*Exy_eff); compute TII_VE = 2.0*Z*EII_eff; if (TII_VE>Ty) { //printf("ExxPart = %.2e, ExxGrid0 = %.2e, ExxGrid1 = %.2e, ExxGrid2 = %.2e, ExxGrid3 = %.2e\n", Exx, Exx_Grid[ix+(iy)*Grid->nxEC], Exx_Grid[ix+1+(iy)*Grid->nxEC], Exx_Grid[ix+(iy+1)*Grid->nxEC], Exx_Grid[ix+1+(iy+1)*Grid->nxEC]); Lambda = Ty/TII_VE; lambda = 2.0*EII_eff*(1.0-Lambda); khi = Ty/lambda; } else { Lambda = 1.0; khi = 1e30; } if (updateStresses) { thisParticle->sigma_xx_0 = 2.0*Z*(Exx_eff) * Lambda; thisParticle->sigma_xy_0 = 2.0*Z*(Exy_eff) * Lambda; } } int signX, signY; if (locX<0.0) { signX = -1; } else { signX = 1; } if (locY<0.0) { signY = -1; } else { signY = 1; } int i; if (signX>=0 && signY>=0) { // upper right i = 3; } else if (signX<0 && signY>=0) { // upper left // the particle is in the SE quadrant, the cell center 1 is NW (wrt to the node ix,iy) i = 2; } else if (signX>=0 && signY<0) { // lower right i = 1; } else if (signX<0 && signY<0) { // lower left i = 0; } else { printf("error in Interp_ECVal_Cell2Particle_Local. No case was triggered\n."); exit(0); } iCell = (ix+IxN[i] + (iy+IyN[i]) * Grid->nxEC); compute weight = fabs(locX)*fabs(locY); sumOfWeightsCells[iCell] += weight; Physics->Lambda[iCell] += Lambda*weight; Physics->khi[iCell] += khi*weight; weight = (1.0 - fabs(locX)) * (1.0 - fabs(locY)); sumOfWeightsNodes[iNode] += weight; Physics->LambdaShear[iNode] += Lambda*weight; thisParticle = thisParticle->next; } // while particles } // ix } // iy } // iColor // Finished to compute the averages #pragma omp parallel for private(iy,ix, iCell) OMP_SCHEDULE for (iy = 1; iy<Grid->nyEC-1; iy++) { for (ix = 1; ix<Grid->nxEC-1; ix++) { iCell = ix + iy*Grid->nxEC; if (sumOfWeightsCells[iCell]==0.0) { printf("error: zero sum on cell ix = %i, iy = %i", ix, iy); exit(0); } Physics->Lambda [iCell] /= sumOfWeightsCells[iCell]; //Physics->Eps_pxx[iCell] /= sumOfWeightsCells[iCell]; Physics->khi[iCell] /= sumOfWeightsCells[iCell]; } } //Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Eps_pxx, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Lambda, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->khi, Grid); #pragma omp parallel for private(iy,ix, iNode) OMP_SCHEDULE for (iy = 0; iy<Grid->nyS; iy++) { for (ix = 0; ix<Grid->nxS; ix++) { iNode = ix + iy*Grid->nxS; Physics->LambdaShear[iNode] /= sumOfWeightsNodes[iNode]; //Physics->Eps_pxy[iNode] /= sumOfWeightsNodes[iNode]; } } /* for (iy = 1; iy<Grid->nyEC-1; iy++) { for (ix = 1; ix<Grid->nxEC-1; ix++) { iCell = ix + iy*Grid->nxEC; compute lambda = Physics->lambda [iCell]; if (lambda>0.0) {} compute Z = Physics->Z[iCell]; compute G = Physics->G[iCell]; compute dVxdx = (Physics->Vx[(ix) + (iy)*Grid->nxVx] - Physics->Vx[(ix-1) + (iy)*Grid->nxVx])/Grid->dx; compute dVydy = (Physics->Vy[(ix) + (iy)*Grid->nxVy] - Physics->Vy[(ix) + (iy-1)*Grid->nxVy])/Grid->dy; compute Exx = 0.5*(dVxdx-dVydy); //compute Exy = Interp_NodeVal_Node2Cell_Local(Eps_xy_NodeGlobal, ix, iy, nxS); compute Txx0 = Physics->sigma_xx_0[iCell]; compute Txx_VE = 2.0 * Z*(Exx + Txx0/(2.0*G*dt)); compute Txy_VE = 2.0 * Z*(Exx + Txy0/(2.0*G*dt)); compute TII_VE = sqrt(Txx_VE*Txx_VE + Txy_VE*Txy_VE); } } } */ free(Exx_Grid); free(Exy_Grid); free(sumOfWeightsCells); free(sumOfWeightsNodes); } void Physics_Eta_ZandLambda_updateGlobal(Model* Model) { Grid* Grid = &(Model->Grid); Physics* Physics = &(Model->Physics); MatProps* MatProps = &(Model->MatProps); Numerics* Numerics = &(Model->Numerics); BC* BCStokes = &(Model->BCStokes); Physics_Eta_EffStrainRate_updateGlobal(Model); int ix, iy, iCell, iNode; // Find the pressure of the water column compute* WaterColumnPressure = (compute*) malloc(Grid->nxEC * sizeof(compute)); WaterColumnPressure[0] = 0.0; // dummy values WaterColumnPressure[Grid->nxEC-1] = 0.0; // for each column check from the base up to the point where we find an air filled cell for (ix = 1; ix<Grid->nxEC-1; ix++) { for (iy = 1; iy<Grid->nyEC-1; iy++) { iCell = ix + iy*Grid->nxEC; if (MatProps->rho0[Physics->phaseAir]<1e-10) { // the WaterColumnPressure thing is a bit rough, so when the air has 0 density it's nicer to set it to 0 WaterColumnPressure[ix] = 0.0; break; } if (Physics->phase[iCell] == Physics->phaseAir || Physics->phase[iCell] == Physics->phaseWater) { WaterColumnPressure[ix] = Physics->P[ix + (iy+1)*Grid->nxEC]; // take the cell above because this one is partially filled with not air break; } } } SinglePhase* thisPhaseInfo; // ===== Plastic stress corrector ===== #pragma omp parallel for private(iy,ix, iCell, thisPhaseInfo) OMP_SCHEDULE for (iy = 1; iy<Grid->nyEC-1; iy++) { for (ix = 1; ix<Grid->nxEC-1; ix++) { iCell = ix + iy*Grid->nxEC; // update cohesion and friction angle compute sumOfWeights = Physics->sumOfWeightsCells[iCell]; int phase; compute weight; compute cohesion, frictionAngle; cohesion = 0.0; frictionAngle = 0.0; thisPhaseInfo = Physics->phaseListHead[iCell]; compute staticPfFac = 0.0; compute staticPfFacWeakFac = 0.0; compute frictionAngleWeakFac = 0.0; compute cohesionWeakFac = 0.0; compute strainWeakStart = 0.0; compute strainWeakEnd = 0.0; compute dilationAngle = 0.0; while (thisPhaseInfo != NULL) { phase = thisPhaseInfo->phase; weight = thisPhaseInfo->weight; cohesion += MatProps->cohesion[phase] * weight; frictionAngle += MatProps->frictionAngle[phase] * weight; staticPfFac += MatProps->staticPfFac[phase] * weight; staticPfFacWeakFac += MatProps->staticPfFacWeakFac[phase] * weight; frictionAngleWeakFac+= MatProps->frictionAngleWeakFac[phase] * weight; cohesionWeakFac += MatProps->cohesionWeakFac[phase] * weight; strainWeakStart += MatProps->strainWeakStart[phase] * weight; strainWeakEnd += MatProps->strainWeakEnd[phase] * weight; dilationAngle += MatProps->dilationAngle[phase] * weight; thisPhaseInfo = thisPhaseInfo->next; } cohesion /= sumOfWeights; frictionAngle /= sumOfWeights; staticPfFac /= sumOfWeights; staticPfFacWeakFac /= sumOfWeights; frictionAngleWeakFac/= sumOfWeights; cohesionWeakFac /= sumOfWeights; strainWeakStart /= sumOfWeights; strainWeakEnd /= sumOfWeights; dilationAngle /= sumOfWeights; if (iy<=1) { if (BCStokes->Bottom_type==Bottom_Weakenable) { cohesion = BCStokes->Bottom_cohesion; frictionAngle = BCStokes->Bottom_frictionAngle; staticPfFac = BCStokes->Bottom_staticPfFac; } } // Strain weakening // ============================================= compute CriticalStrain0= strainWeakStart; compute CriticalStrain1 = strainWeakEnd; compute Cini = cohesion; compute Cend = cohesion*(1.0-cohesionWeakFac); compute fricIni = frictionAngle; compute fricEnd = frictionAngle*(1.0-frictionAngleWeakFac); compute staticPfFacIni = staticPfFac; compute staticPfFacEnd = (1.0-staticPfFacWeakFac)*( staticPfFac ) + staticPfFacWeakFac; // such that the weakening factor in front of the pressure is (1.0-staticPfFacWeakFac)*(1.0-Pf) staticPfFacEnd = fmin(staticPfFacEnd,0.99); compute preFac = 0.00; compute Fac; // ============================================= compute Z_VE = 1.0/(1.0/Physics->eta[iCell] + 1.0/(Physics->G[iCell]*Physics->dt) ); int iTry; compute LambdaOld; compute Lambda = 1.0; for (iTry=0;iTry<1;iTry++){ LambdaOld = Lambda; Lambda = Physics->Z[iCell]/Z_VE; if (iTry == 1 && (1.0-Lambda<0.0001)){ break; } if (iTry > 0 && fabs(Lambda-LambdaOld)<0.0001){ break; } compute EpII = Physics->EII_eff[iCell]*(1.0-Lambda); //compute plasticStrain = Physics->strain[iCell];// + Physics->Dstrain[iCell]; compute plasticStrain = Physics->strain[iCell];//+ EpII*Physics->dtAdv; if (plasticStrain<CriticalStrain0) { Fac = (1.0 - preFac * (plasticStrain)/(CriticalStrain0)); } else { Fac = 1.0 - preFac - (plasticStrain-CriticalStrain0)/(CriticalStrain1-CriticalStrain0); } Fac = fmin(Fac,1.0); Fac = fmax(Fac,0.0); if (Physics->phase[iCell] == Physics->phaseAir || Physics->phase[iCell] == Physics->phaseWater) { Fac = 1.0; } if (ix>=Grid->nxEC-3) { frictionAngle = fricIni; cohesion = Cini; } else { cohesion = Cini*Fac + (1.0-Fac)*Cend; frictionAngle = fricIni*Fac + (1.0-Fac)*fricEnd; staticPfFac = staticPfFacIni*Fac + (1.0-Fac)*staticPfFacEnd; } if (iy<=1) { if (BCStokes->Bottom_type==Bottom_Fixed) { cohesion = BCStokes->Bottom_cohesion; frictionAngle = BCStokes->Bottom_frictionAngle; staticPfFac = BCStokes->Bottom_staticPfFac; } } // ============================================= compute strain = plasticStrain; compute lim = 0.5; compute lim0 = 1e-2; if (strain>lim){ strain = lim; } // psi = dilationAngle*1.0/(lim-lim0)*(lim-lim0-strain-lim0); compute psi = dilationAngle*(1.0-(strain-lim0)/(lim-lim0)); //compute Pe = (1.0-staticPfFac) * (Physics->P[iCell] - WaterColumnPressure[ix]); compute sigma_yy = (fabs(-Physics->sigma_xx_0[iCell]-Physics->P[iCell]) - WaterColumnPressure[ix]); if (sigma_yy<1e-2) { sigma_yy = 1e-2; } //cohesion *= sigma_yy; //compute Pf = staticPfFac * (fabs(-Physics->sigma_xx_0[iCell]-Physics->P[iCell]) - WaterColumnPressure[ix]); // i.e. Lambda * sigma_n sigma_yy = -Physics->sigma_xx_0[iCell]+Physics->P[iCell]; if (sigma_yy<.0) { sigma_yy = .0; } //cohesion *= sigma_yy; if (ix == 100) { //printf("sigma_yy=%.2e\n",sigma_yy); } //compute Pf = staticPfFac * (sigma_yy- WaterColumnPressure[ix]); //compute Pe = Physics->P[iCell] - WaterColumnPressure[ix] - Pf; //compute Pe = (1.0-staticPfFac) * (Physics->P[iCell] - WaterColumnPressure[ix]); compute Pe = (1.0-staticPfFac) * Physics->P[iCell]; if (Pe<0.0) { Pe = 0.0; } compute TII_VE; Physics->Lambda[iCell] = 1.0; //compute TII_VE = Physics_StressInvariant_getLocalCell(Model, ix, iy); compute EII_eff = Physics->EII_eff[iCell]; TII_VE = 2.0 * Z_VE * EII_eff; compute Ty = cohesion * cos(frictionAngle) + Pe * sin(frictionAngle); Physics->Tau_y[iCell] = Ty; if (TII_VE>Ty) { compute Lambda = Ty/TII_VE; compute lambda = 2.0*Physics->EII_eff[iCell]*(1.0-Lambda); Physics->Z[iCell] = Z_VE * Lambda; Physics->Lambda[iCell] = 1.0;//Lambda; Physics->khi[iCell] = Ty/lambda; Physics->volumeChange[iCell] = 2.0*sin(psi)*EII_eff*(1.0-Lambda);//EpII;; } else { Physics->khi[iCell] = 1e30; Physics->Z[iCell] = Z_VE; Physics->Lambda[iCell] = 1.0; Physics->volumeChange[iCell] = 0.0; } } } } // ===== Plastic stress corrector ===== Physics_CellVal_SideValues_copyNeighbours_Global(Physics->volumeChange, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->khi, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Z, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Lambda, Grid); Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Tau_y, Grid); //int iNode; #pragma omp parallel for private(iy,ix, iNode) OMP_SCHEDULE for (iy = 0; iy<Grid->nyS; iy++) { for (ix = 0; ix<Grid->nxS; ix++) { iNode = ix + iy*Grid->nxS; Physics->LambdaShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->Lambda, ix, iy, Grid->nxEC); Physics->ZShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->Z, ix, iy, Grid->nxEC); Physics->khiShear[iNode] = Interp_ECVal_Cell2Node_Local(Physics->khi, ix, iy, Grid->nxEC); compute Z_VE = 1.0/(1.0/Physics->etaShear[iNode] + 1.0/(Physics->GShear[iNode]*Physics->dt) ); compute TII_VE = 0; Physics->LambdaShear[iNode] = 1.0; //compute TII_VE = Physics_StressInvariant_getLocalCell(Model, ix, iy); compute EII_eff = Physics->EII_effShear[iNode]; TII_VE = 2.0 * Z_VE * EII_eff; compute Ty = Interp_ECVal_Cell2Node_Local(Physics->Tau_y, ix, iy, Grid->nxEC); if (TII_VE>Ty) { compute Lambda = Ty/TII_VE; compute lambda = 2.0*Physics->EII_effShear[iNode]*(1.0-Lambda); Physics->ZShear[iNode] = Z_VE * Lambda; Physics->LambdaShear[iNode] = 1.0;//Lambda; Physics->khiShear[iNode] = Ty/lambda; } else { Physics->khiShear[iNode] = 1e30; Physics->ZShear[iNode] = Z_VE; Physics->LambdaShear[iNode] = 1.0; } if ((Physics->ZShear[iNode]==0.0)) { printf("Zshear=0, Z_VE = %.2e, Physics->LambdaShear[iNode] = %.2e,Ty =%.2e, TII_VE =%.2e\n", Z_VE, Physics->LambdaShear[iNode], Ty, TII_VE); } } } free(WaterColumnPressure); }
hypre_hopscotch_hash.h
/*BHEADER********************************************************************** * Copyright (c) 2017, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322. * This file is part of AMG. See files README and COPYRIGHT for details. * * AMG is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * This software is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the * GNU General Public License for more details. * ***********************************************************************EHEADER*/ /** * Hopscotch hash is modified from the code downloaded from * https://sites.google.com/site/cconcurrencypackage/hopscotch-hashing * with the following terms of usage */ //////////////////////////////////////////////////////////////////////////////// //TERMS OF USAGE //------------------------------------------------------------------------------ // // Permission to use, copy, modify and distribute this software and // its documentation for any purpose is hereby granted without fee, // provided that due acknowledgments to the authors are provided and // this permission notice appears in all copies of the software. // The software is provided "as is". There is no warranty of any kind. // //Authors: // Maurice Herlihy // Brown University // and // Nir Shavit // Tel-Aviv University // and // Moran Tzafrir // Tel-Aviv University // // Date: July 15, 2008. // //////////////////////////////////////////////////////////////////////////////// // Programmer : Moran Tzafrir (MoranTza@gmail.com) // Modified : Jongsoo Park (jongsoo.park@intel.com) // Oct 1, 2015. // //////////////////////////////////////////////////////////////////////////////// #ifndef hypre_HOPSCOTCH_HASH_HEADER #define hypre_HOPSCOTCH_HASH_HEADER #include <stdio.h> #include <limits.h> #include <assert.h> #include <math.h> #ifdef HYPRE_USING_OPENMP #include <omp.h> #endif #include "_hypre_utilities.h" // Potentially architecture specific features used here: // __builtin_ffs // __sync_val_compare_and_swap #ifdef __cplusplus extern "C" { #endif /****************************************************************************** * This next section of code is here instead of in _hypre_utilities.h to get * around some portability issues with Visual Studio. By putting it here, we * can explicitly include this '.h' file in a few files in hypre and compile * them with C++ instead of C (VS does not support C99 'inline'). ******************************************************************************/ #ifdef HYPRE_USING_ATOMIC static inline HYPRE_Int hypre_compare_and_swap(HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval) { #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 return __sync_val_compare_and_swap(ptr, oldval, newval); //#elif defind _MSC_VER //return _InterlockedCompareExchange((long *)ptr, newval, oldval); //#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) // JSP: not many compilers have implemented this, so comment out for now //_Atomic HYPRE_Int *atomic_ptr = ptr; //atomic_compare_exchange_strong(atomic_ptr, &oldval, newval); //return oldval; #endif } static inline HYPRE_Int hypre_fetch_and_add(HYPRE_Int *ptr, HYPRE_Int value) { #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 return __sync_fetch_and_add(ptr, value); //#elif defined _MSC_VER //return _InterlockedExchangeAdd((long *)ptr, value); //#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) // JSP: not many compilers have implemented this, so comment out for now //_Atomic HYPRE_Int *atomic_ptr = ptr; //return atomic_fetch_add(atomic_ptr, value); #endif } #else // !HYPRE_USING_ATOMIC static inline HYPRE_Int hypre_compare_and_swap(HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval) { if (*ptr == oldval) { *ptr = newval; return oldval; } else return *ptr; } static inline HYPRE_Int hypre_fetch_and_add(HYPRE_Int *ptr, HYPRE_Int value) { HYPRE_Int oldval = *ptr; *ptr += value; return oldval; } #endif // !HYPRE_USING_ATOMIC /******************************************************************************/ // Constants ................................................................ #define HYPRE_HOPSCOTCH_HASH_HOP_RANGE (32) #define HYPRE_HOPSCOTCH_HASH_INSERT_RANGE (4*1024) #define HYPRE_HOPSCOTCH_HASH_EMPTY (0) #define HYPRE_HOPSCOTCH_HASH_BUSY (1) // Small Utilities .......................................................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH static inline HYPRE_Int first_lsb_bit_indx(hypre_uint x) { if (0 == x) return -1; return __builtin_ffs(x) - 1; } #endif /** * hypre_Hash is adapted from xxHash with the following license. */ /* xxHash - Extremely Fast Hash algorithm Header File Copyright (C) 2012-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - xxHash source repository : https://github.com/Cyan4973/xxHash */ /*************************************** * Constants ***************************************/ #define HYPRE_XXH_PRIME32_1 2654435761U #define HYPRE_XXH_PRIME32_2 2246822519U #define HYPRE_XXH_PRIME32_3 3266489917U #define HYPRE_XXH_PRIME32_4 668265263U #define HYPRE_XXH_PRIME32_5 374761393U #define HYPRE_XXH_PRIME64_1 11400714785074694791ULL #define HYPRE_XXH_PRIME64_2 14029467366897019727ULL #define HYPRE_XXH_PRIME64_3 1609587929392839161ULL #define HYPRE_XXH_PRIME64_4 9650029242287828579ULL #define HYPRE_XXH_PRIME64_5 2870177450012600261ULL # define HYPRE_XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) # define HYPRE_XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) #ifdef HYPRE_BIGINT static inline HYPRE_Int hypre_Hash(HYPRE_Int input) { hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input); hypre_ulongint k1 = input; k1 *= HYPRE_XXH_PRIME64_2; k1 = HYPRE_XXH_rotl64(k1, 31); k1 *= HYPRE_XXH_PRIME64_1; h64 ^= k1; h64 = HYPRE_XXH_rotl64(h64, 27)*HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4; h64 ^= h64 >> 33; h64 *= HYPRE_XXH_PRIME64_2; h64 ^= h64 >> 29; h64 *= HYPRE_XXH_PRIME64_3; h64 ^= h64 >> 32; #ifndef NDEBUG if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) { hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY); assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64); } #endif return h64; } #else static inline HYPRE_Int hypre_Hash(HYPRE_Int input) { hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input); // 1665863975 is added to input so that // only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY. // Hence, we're fine as long as key is non-negative. h32 += (input + 1665863975)*HYPRE_XXH_PRIME32_3; h32 = HYPRE_XXH_rotl32(h32, 17)*HYPRE_XXH_PRIME32_4; h32 ^= h32 >> 15; h32 *= HYPRE_XXH_PRIME32_2; h32 ^= h32 >> 13; h32 *= HYPRE_XXH_PRIME32_3; h32 ^= h32 >> 16; //assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32); return h32; } #endif static inline void hypre_UnorderedIntSetFindCloserFreeBucket( hypre_UnorderedIntSet *s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* start_seg, #endif HYPRE_Int *free_bucket, HYPRE_Int *free_dist ) { HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = s->hopInfo[move_bucket]; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]); if(start_seg != move_segment) omp_set_lock(&move_segment->lock); #endif if (start_hop_info == s->hopInfo[move_bucket]) { // new_free_bucket -> free_bucket and empty new_free_bucket HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist; s->key[*free_bucket] = s->key[new_free_bucket]; s->hash[*free_bucket] = s->hash[new_free_bucket]; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif s->hopInfo[move_bucket] |= (1U << move_free_dist); s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif } ++move_bucket; } *free_bucket = -1; *free_dist = 0; } static inline void hypre_UnorderedIntMapFindCloserFreeBucket( hypre_UnorderedIntMap *m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* start_seg, #endif hypre_HopscotchBucket** free_bucket, HYPRE_Int* free_dist) { hypre_HopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = move_bucket->hopInfo; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]); if (start_seg != move_segment) omp_set_lock(&move_segment->lock); #endif if (start_hop_info == move_bucket->hopInfo) { // new_free_bucket -> free_bucket and empty new_free_bucket hypre_HopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist; (*free_bucket)->data = new_free_bucket->data; (*free_bucket)->key = new_free_bucket->key; (*free_bucket)->hash = new_free_bucket->hash; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif move_bucket->hopInfo |= (1U << move_free_dist); move_bucket->hopInfo &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif } ++move_bucket; } *free_bucket = NULL; *free_dist = 0; } void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s ); void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m ); // Query Operations ......................................................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH static inline HYPRE_Int hypre_UnorderedIntSetContains( hypre_UnorderedIntSet *s, HYPRE_Int key ) { //CALCULATE HASH .......................... HYPRE_Int hash = hypre_Hash(key); //CHECK IF ALREADY CONTAIN ................ hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; HYPRE_Int bucket = hash & s->bucketMask; hypre_uint hopInfo = s->hopInfo[bucket]; if (0 == hopInfo) return 0; else if (1 == hopInfo ) { if (hash == s->hash[bucket] && key == s->key[bucket]) return 1; else return 0; } HYPRE_Int startTimestamp = segment->timestamp; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) return 1; hopInfo &= ~(1U << i); } if (segment->timestamp == startTimestamp) return 0; HYPRE_Int i; for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i) { if (hash == s->hash[bucket + i] && key == s->key[bucket + i]) return 1; } return 0; } /** * @ret -1 if key doesn't exist */ static inline HYPRE_Int hypre_UnorderedIntMapGet( hypre_UnorderedIntMap *m, HYPRE_Int key) { //CALCULATE HASH .......................... HYPRE_Int hash = hypre_Hash(key); //CHECK IF ALREADY CONTAIN ................ hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; hypre_HopscotchBucket *elmAry = &(m->table[hash & m->bucketMask]); hypre_uint hopInfo = elmAry->hopInfo; if (0 == hopInfo) return -1; else if (1 == hopInfo ) { if (hash == elmAry->hash && key == elmAry->key) return elmAry->data; else return -1; } HYPRE_Int startTimestamp = segment->timestamp; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_HopscotchBucket* currElm = elmAry + i; if (hash == currElm->hash && key == currElm->key) return currElm->data; hopInfo &= ~(1U << i); } if (segment->timestamp == startTimestamp) return -1; hypre_HopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]); HYPRE_Int i; for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket) { if (hash == currBucket->hash && key == currBucket->key) return currBucket->data; } return -1; } #endif //status Operations ......................................................... static inline HYPRE_Int hypre_UnorderedIntSetSize(hypre_UnorderedIntSet *s) { HYPRE_Int counter = 0; HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ++counter; } } return counter; } static inline HYPRE_Int hypre_UnorderedIntMapSize(hypre_UnorderedIntMap *m) { HYPRE_Int counter = 0; HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash ) { ++counter; } } return counter; } HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len ); //modification Operations ................................................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH static inline void hypre_UnorderedIntSetPut( hypre_UnorderedIntSet *s, HYPRE_Int key ) { //CALCULATE HASH .......................... HYPRE_Int hash = hypre_Hash(key); //LOCK KEY HASH ENTERY .................... hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; omp_set_lock(&segment->lock); HYPRE_Int bucket = hash&s->bucketMask; //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = s->hopInfo[bucket]; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if(hash == s->hash[currElm] && key == s->key[currElm]) { omp_unset_lock(&segment->lock); return; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... HYPRE_Int free_bucket = bucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket], (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) break; } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { s->key[free_bucket] = key; s->hash[free_bucket] = hash; s->hopInfo[bucket] |= 1U << free_dist; omp_unset_lock(&segment->lock); return; } hypre_UnorderedIntSetFindCloserFreeBucket(s, segment, &free_bucket, &free_dist); } while (-1 != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return; } static inline HYPRE_Int hypre_UnorderedIntMapPutIfAbsent( hypre_UnorderedIntMap *m, HYPRE_Int key, HYPRE_Int data) { //CALCULATE HASH .......................... HYPRE_Int hash = hypre_Hash(key); //LOCK KEY HASH ENTERY .................... hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; omp_set_lock(&segment->lock); hypre_HopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]); //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = startBucket->hopInfo; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_HopscotchBucket* currElm = startBucket + i; if (hash == currElm->hash && key == currElm->key) { HYPRE_Int rc = currElm->data; omp_unset_lock(&segment->lock); return rc; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... hypre_HopscotchBucket* free_bucket = startBucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) && (HYPRE_HOPSCOTCH_HASH_EMPTY == __sync_val_compare_and_swap((HYPRE_Int *)&free_bucket->hash, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) break; } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { free_bucket->data = data; free_bucket->key = key; free_bucket->hash = hash; startBucket->hopInfo |= 1U << free_dist; omp_unset_lock(&segment->lock); return HYPRE_HOPSCOTCH_HASH_EMPTY; } hypre_UnorderedIntMapFindCloserFreeBucket(m, segment, &free_bucket, &free_dist); } while (NULL != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return HYPRE_HOPSCOTCH_HASH_EMPTY; } #endif #ifdef __cplusplus } // extern "C" #endif #endif // hypre_HOPSCOTCH_HASH_HEADER