source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_unop__sqrt_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__sqrt_fc64_fc64) // op(A') function: GB (_unop_tran__sqrt_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = csqrt (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csqrt (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = csqrt (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SQRT || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__sqrt_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csqrt (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csqrt (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__sqrt_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
concat_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: jjzeng@openailab.com */ #include <math.h> #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "concat_param.h" #include "compiler_fp16.h" struct shape_dim { int dim[4]; float scale; int zero; }; struct concat_op_param { struct shape_dim* input_shape; int input_counts; int input_dim; struct shape_dim output_shape; int output_dim; int axis; float out_scale; void** input_data; }; static int ref_concat_fp32(const float** in_data, float* out_data, const struct concat_op_param* param, int num_thread) { int axis = param->axis; int concat_dim = 0; for (int ii = 0; ii < param->input_counts; ++ii) { concat_dim += param->input_shape[ii].dim[axis]; } if (concat_dim != param->output_shape.dim[axis]) { fprintf(stderr, "concant dimensions[%d] is not same output[%d]\n", concat_dim, param->output_shape.dim[axis]); return -1; } int out_size, in_size; out_size = 1; for (int ii = 0; ii < axis; ++ii) { out_size *= param->output_shape.dim[ii]; } in_size = 1; for (int ii = axis + 1; ii < param->output_dim; ++ii) { in_size *= param->input_shape[0].dim[ii]; } float* output_ptr = out_data; for (int k = 0; k < out_size; ++k) { // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < param->input_counts; ++j) { int cp_size = param->input_shape[j].dim[axis] * in_size; memcpy(output_ptr, in_data[j] + k * cp_size, cp_size * sizeof(float)); output_ptr += cp_size; } } return 0; } static int ref_concat_fp16(const __fp16** in_data, __fp16* out_data, const struct concat_op_param* param, int num_thread) { int axis = param->axis; int concat_dim = 0; for(int ii = 0; ii < param->input_counts; ++ii) { concat_dim += param->input_shape[ii].dim[axis]; } if(concat_dim != param->output_shape.dim[axis]) { printf("concat dimensions is not same output: ( %d -- %d )\n", concat_dim, param->output_shape.dim[axis]); return -1; } int out_size, in_size; out_size = 1; for(int ii = 0; ii < axis; ++ii) { out_size *= param->output_shape.dim[ii]; } in_size = 1; for(int ii = axis + 1; ii < param->output_dim; ++ii) { in_size *= param->input_shape[0].dim[ii]; } __fp16* output_ptr = out_data; for(int k = 0; k < out_size; ++k) { for(int j = 0; j < param->input_counts; ++j) { int cp_size = param->input_shape[j].dim[axis] * in_size; memcpy(output_ptr, in_data[j] + k * cp_size, cp_size * sizeof(__fp16)); output_ptr += cp_size; } } return 0; } static int ref_concat_uint8(const uint8_t** in_data, uint8_t* out_data, const struct concat_op_param* param, int num_thread) { int axis = param->axis; int concat_dim = 0; for (int ii = 0; ii < param->input_counts; ++ii) { concat_dim += param->input_shape[ii].dim[axis]; } if (concat_dim != param->output_shape.dim[axis]) { fprintf(stderr, "concat dimensions is not same output: ( %d -- %d )\n", concat_dim, param->output_shape.dim[axis]); return -1; } int outer_size, in_size; outer_size = 1; for (int ii = 0; ii < axis; ++ii) { outer_size *= param->output_shape.dim[ii]; } in_size = 1; for (int ii = axis + 1; ii < param->output_dim; ++ii) { in_size *= param->output_shape.dim[ii]; } int output_size = 1; for (int ii = 0; ii < param->output_dim; ++ii) { output_size *= param->output_shape.dim[ii]; } uint8_t* output_ptr = out_data; float out_scale = param->output_shape.scale; uint8_t out_zero = param->output_shape.zero; for (int k = 0; k < outer_size; ++k) { for (int j = 0; j < param->input_counts; ++j) { int cp_size = param->input_shape[j].dim[axis] * in_size; float scale = param->input_shape[j].scale; uint8_t input_zero = param->input_shape[j].zero; const uint8_t* input_ptr = ( const uint8_t* )(in_data[j] + k * cp_size); if (scale == out_scale && input_zero == out_zero) { memcpy(output_ptr, input_ptr, cp_size); } else { float t_scale = scale / out_scale; for (int ii = 0; ii < cp_size; ++ii) { output_ptr[ii] = round((input_ptr[ii] - input_zero) * t_scale) + out_zero; } } output_ptr += cp_size; } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct concat_op_param* concat_op_param = ( struct concat_op_param* )sys_malloc(sizeof(struct concat_op_param)); concat_op_param->axis = 0; concat_op_param->input_counts = 1; concat_op_param->input_dim = 1; concat_op_param->input_shape = NULL; concat_op_param->out_scale = 0.1f; concat_op_param->output_dim = 1; exec_node->ops_priv = concat_op_param; return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { sys_free(exec_node->ops_priv); return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* output_tensor; output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv; struct concat_param* concat_param = ( struct concat_param* )ir_node->op.param_mem; concat_op_param->axis = concat_param->axis; concat_op_param->input_counts = ir_node->input_num; concat_op_param->input_shape = ( struct shape_dim* )sys_malloc(sizeof(struct shape_dim) * ir_node->input_num); concat_op_param->output_dim = output_tensor->dim_num; for (int ii = 0; ii < output_tensor->dim_num; ii++) { concat_op_param->output_shape.dim[ii] = output_tensor->dims[ii]; concat_op_param->output_shape.scale = output_tensor->scale; concat_op_param->output_shape.zero = output_tensor->zero_point; } concat_op_param->input_data = ( void* )sys_malloc(sizeof(void*) * ir_node->input_num); return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv; void* out_data = output_tensor->data; for (int i = 0; i < ir_node->input_num; i++) { input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[i]); int number = input_tensor->dim_num; for (int j = 0; j < number; j++) { concat_op_param->input_shape[i].dim[j] = input_tensor->dims[j]; concat_op_param->input_shape[i].scale = input_tensor->scale; concat_op_param->input_shape[i].zero = input_tensor->zero_point; } concat_op_param->input_data[i] = input_tensor->data; } int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_concat_fp32(( const float** )concat_op_param->input_data, out_data, concat_op_param, exec_graph->num_thread); else if (input_tensor->data_type == TENGINE_DT_FP16) ret = ref_concat_fp16(( const __fp16** )concat_op_param->input_data, out_data, concat_op_param, exec_graph->num_thread); else ret = ref_concat_uint8(( const uint8_t** )concat_op_param->input_data, out_data, concat_op_param, exec_graph->num_thread); return ret; } static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv; sys_free(concat_op_param->input_shape); sys_free(concat_op_param->input_data); return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_BEST; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = postrun, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_concat_hcl_ops(void* arg) { return register_builtin_node_ops(OP_CONCAT, &hcl_node_ops); } static int unreg_concat_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_CONCAT, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_concat_hcl_ops); AUTO_UNREGISTER_OPS(unreg_concat_hcl_ops);
omp_hello.c
/****************************************************************************** * * FILE: omp_hello.c * * DESCRIPTION: * * OpenMP Example - Hello World - C/C++ Version * * In this simple example, the master thread forks a parallel region. * * All threads in the team obtain their unique thread number and print it. * * The master thread only prints the total number of threads. Two OpenMP * * library routines are used to obtain the number of threads and each * * thread's number. * * AUTHOR: Blaise Barney 5/99 * * LAST REVISED: 04/06/05 * ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); if (tid == 0) { nthreads = 5; printf("Number of threads = %d\n", nthreads); } /* Only master thread does this */ if (tid == 1) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ }
omp-matmat-static-parallel.c
/***************************************************************************** Example : omp-matmat-static-parallel.c Objective : Matrix - Matrix Multiplication using OpenMP one PARALLEL for directive with Schedule(static) and Private Clause Input : Size of Matrices(i.e Size of Matrix A and Matrix B) ie in terms of CLASS where CLASS A :1024; CLASS B: 2048 and CLASS C: 4096 Number of Threads Output : Number of Threads Total Memory Utilized for the Matrix - Matrix Computation Total Time Taken for Matrix - Matrix Computaion Created :Aug 2011 . Author : RarchK *********************************************************************************/ #include <stdio.h> #include <sys/time.h> #include <omp.h> #include <stdlib.h> /* Function declaration */ double Matrix_Multiplication_Static(double **Matrix_A,double **Matrix_B,double **Result,int N_size,int Total_threads); /* Main Program */ main(int argc , char * argv[]) { int CLASS_SIZE,N_size, i,j,k,Total_threads,THREADS; double Total_overhead = 0.0; double **Matrix_A, **Matrix_B, **Result; double memoryused=0.0; int iteration; FILE *fp; char * CLASS; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : Dense Matrix Computations (Floating Point Operations)\n "); printf("\n\t\t Matrix into Matrix Multiplication using "); printf("\n\t\t OpenMP one PARALLEL for directive with Schedule(static) and Private Clause"); printf("\n\t\t..........................................................................\n"); /* Checking for the command line arguments */ if( argc != 3 ){ printf("\t\t Very Few Arguments\n "); printf("\t\t Syntax : exec <Class-Size> <Threads>\n"); printf("\t\t Where : Class-Size must be A or B or C \n"); exit(-1); } else { CLASS = argv[1]; THREADS = atoi(argv[2]); } if( strcmp(CLASS, "A" )==0){ CLASS_SIZE = 1024; } else if( strcmp(CLASS, "B" )==0){ CLASS_SIZE = 2048; } else if( strcmp(CLASS, "C" )==0){ CLASS_SIZE = 4096; } else { printf("\n\t\t Class-Size must be A or B or C \n"); exit(-1); } N_size = CLASS_SIZE; Total_threads = THREADS; printf("\n\t\t Matrix Size : %d",N_size); printf("\n\t\t Threads : %d",Total_threads); printf("\n"); /* Matrix_A Elements */ Matrix_A = (double **) malloc(sizeof(double *) * N_size); for (i = 0; i < N_size; i++) { Matrix_A[i] = (double *) malloc(sizeof(double) * N_size); for (j = 0; j < N_size; j++) { // srand48((unsigned int)N_size); // Matrix_A[i][j] = (double)(rand()%10); Matrix_A[i][j] = i+j; } } /* Matrix_B Elements */ Matrix_B = (double **) malloc(sizeof(double *) * N_size); for (i = 0; i < N_size; i++) { Matrix_B[i] = (double *) malloc(sizeof(double) * N_size); for (j = 0; j < N_size; j++) { // srand48((unsigned int)N_size); // Matrix_B[i][j] = (double)(rand()%10); Matrix_B[i][j] = i+j; } } /* Dynamic Memory Allocation */ Result = (double **) malloc(sizeof(double *) * N_size); for (i = 0; i < N_size; i++) Result[i] = (double *) malloc(sizeof(double) * N_size); memoryused = (3*(N_size*N_size))*sizeof(double); /* Function Calling */ Total_overhead = Matrix_Multiplication_Static(Matrix_A,Matrix_B,Result,N_size,Total_threads); printf("\n\t\t Memory Utilized : %lf MB \n",(memoryused/(1024*1024))); printf("\n\t\t Time in Seconds (T) : %lf Seconds \n",Total_overhead); printf("\n\t\t ( T represents the Time taken for the execution )"); printf("\n\t\t..........................................................................\n"); /* Free Memory */ free(Matrix_A); free(Matrix_B); free(Result); }/* Main function end */ /* Functions implementation */ double Matrix_Multiplication_Static(double **Matrix_A,double **Matrix_B,double **Result,int N_size,int Total_threads) { int i,j,k; struct timeval TimeValue_Start; struct timezone TimeZone_Start; struct timeval TimeValue_Final; struct timezone TimeZone_Final; long time_start, time_end; double time_overhead; gettimeofday(&TimeValue_Start, &TimeZone_Start); /* set the no. of threads */ omp_set_num_threads(Total_threads); /* OpenMP For Directive with static option Do matrix multiply sharing iterations on outer loop */ #pragma omp parallel for private(j,k) schedule(static) for (i = 0; i < N_size; i = i + 1){ for (j = 0; j < N_size; j = j + 1){ Result[i][j]=0.0; for (k = 0; k < N_size; k = k + 1) Result[i][j] = Result[i][j] + Matrix_A[i][k] * Matrix_B[k][j]; } }/* End of the Parallel section */ gettimeofday(&TimeValue_Final, &TimeZone_Final); /* Calculate the time taken for the computation */ time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead = (time_end - time_start)/1000000.0; printf("\n\t\t Matrix into Matrix Multiplication using one Parallel for pragma with static option......Done \n"); return time_overhead; }
band.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file band.h * * \brief Contains declaration and partial implementation of sirius::Band class. */ #ifndef __BAND_H__ #define __BAND_H__ #include "periodic_function.h" #include "k_point_set.h" #include "Hamiltonian/local_operator.hpp" #include "non_local_operator.h" #include "hubbard.hpp" #include "Hamiltonian.h" namespace sirius { // TODO: Band problem is a mess and needs more formal organizaiton. We have different basis functions. // We can do first- and second-variation or a full variation. We can do iterative or exact diagonalization. // This has to be organized. // solve_for_kset should be the the main entry point (rename to solve() and pass K_point_set) // solve() ---> solve_fplapw(K_point) -> |--> second_variation() --> fv_diag() : exact or itrative : sv_diag() // | |--> single_variation() : exact or iterative // | // | // \--> solve_pppw(K_point) -> |--> davidson() // |--> rmm_diis() // |--> chebyshev() // |--> exact() /// Setup and solve the eigen value problem. class Band { private: /// Simulation context. Simulation_context& ctx_; /// Alias for the unit cell. Unit_cell& unit_cell_; /// BLACS grid for distributed linear algebra operations. BLACS_grid const& blacs_grid_; /// Solve the band diagonalziation problem with single (full) variation. inline int solve_with_single_variation(K_point& kp__, Hamiltonian& hamiltonian__) const; /// Solve the band diagonalziation problem with second variation approach. /** This is only used by the FP-LAPW method. */ inline void solve_with_second_variation(K_point& kp__, Hamiltonian& hamiltonian__) const; /// Solve the first-variational (non-magnetic) problem with exact diagonalization. /** This is only used by the LAPW method. */ inline void diag_fv_exact(K_point* kp__, Hamiltonian& hamiltonian__) const; /// Solve the first-variational (non-magnetic) problem with iterative Davidson diagonalization. inline void diag_fv_davidson(K_point* kp__, Hamiltonian& hamiltonian__) const; /// Get singular components of the LAPW overlap matrix. /** Singular components are the eigen-vectors with a very small eigen-value. */ inline void get_singular_components(K_point* kp__, Hamiltonian& H__) const; /// Exact (not iterative) diagonalization of the Hamiltonian. template <typename T> inline void diag_pseudo_potential_exact(K_point* kp__, int ispn__, Hamiltonian& H__) const; /// Iterative Davidson diagonalization. template <typename T> inline int diag_pseudo_potential_davidson(K_point* kp__, Hamiltonian& H__) const; /// RMM-DIIS diagonalization. template <typename T> inline void diag_pseudo_potential_rmm_diis(K_point* kp__, int ispn__, Hamiltonian& H__) const; template <typename T> inline void diag_pseudo_potential_chebyshev(K_point* kp__, int ispn__, Hamiltonian& H__, P_operator<T>& p_op__) const; /// Auxiliary function used internally by residuals() function. inline mdarray<double, 1> residuals_aux(K_point* kp__, int ispn__, int num_bands__, std::vector<double>& eval__, Wave_functions& hpsi__, Wave_functions& opsi__, Wave_functions& res__, mdarray<double, 2>& h_diag__, mdarray<double, 1>& o_diag__) const; /// Compute residuals. template <typename T> inline int residuals(K_point* kp__, int ispn__, int N__, int num_bands__, std::vector<double>& eval__, std::vector<double>& eval_old__, dmatrix<T>& evec__, Wave_functions& hphi__, Wave_functions& ophi__, Wave_functions& hpsi__, Wave_functions& opsi__, Wave_functions& res__, mdarray<double, 2>& h_diag__, mdarray<double, 1>& o_diag__) const; /** Compute \f$ O_{ii'} = \langle \phi_i | \hat O | \phi_{i'} \rangle \f$ operator matrix * for the subspace spanned by the wave-functions \f$ \phi_i \f$. The matrix is always returned * in the CPU pointer because most of the standard math libraries start from the CPU. */ template <typename T> inline void set_subspace_mtrx(int N__, int n__, Wave_functions& phi__, Wave_functions& op_phi__, dmatrix<T>& mtrx__, dmatrix<T>& mtrx_old__) const { PROFILE("sirius::Band::set_subspace_mtrx"); assert(n__ != 0); if (mtrx_old__.size()) { assert(&mtrx__.blacs_grid() == &mtrx_old__.blacs_grid()); } /* copy old N x N distributed matrix */ if (N__ > 0) { splindex<block_cyclic> spl_row(N__, mtrx__.blacs_grid().num_ranks_row(), mtrx__.blacs_grid().rank_row(), mtrx__.bs_row()); splindex<block_cyclic> spl_col(N__, mtrx__.blacs_grid().num_ranks_col(), mtrx__.blacs_grid().rank_col(), mtrx__.bs_col()); #pragma omp parallel for schedule(static) for (int i = 0; i < spl_col.local_size(); i++) { std::copy(&mtrx_old__(0, i), &mtrx_old__(0, i) + spl_row.local_size(), &mtrx__(0, i)); } if (ctx_.control().print_checksum_) { double_complex cs(0, 0); for (int i = 0; i < spl_col.local_size(); i++) { for (int j = 0; j < spl_row.local_size(); j++) { cs += mtrx__(j, i); } } mtrx__.blacs_grid().comm().allreduce(&cs, 1); if (ctx_.comm_band().rank() == 0) { print_checksum("subspace_mtrx_old", cs); } } } /* <{phi,phi_new}|Op|phi_new> */ inner(ctx_.processing_unit(), (ctx_.num_mag_dims() == 3) ? 2 : 0, phi__, 0, N__ + n__, op_phi__, N__, n__, mtrx__, 0, N__); /* restore lower part */ if (N__ > 0) { if (mtrx__.blacs_grid().comm().size() == 1) { #pragma omp parallel for for (int i = 0; i < N__; i++) { for (int j = N__; j < N__ + n__; j++) { mtrx__(j, i) = type_wrapper<T>::bypass(std::conj(mtrx__(i, j))); } } } else { #ifdef __SCALAPACK linalg<CPU>::tranc(n__, N__, mtrx__, 0, N__, mtrx__, N__, 0); #else TERMINATE_NO_SCALAPACK #endif } } if (ctx_.control().print_checksum_) { splindex<block_cyclic> spl_row(N__ + n__, mtrx__.blacs_grid().num_ranks_row(), mtrx__.blacs_grid().rank_row(), mtrx__.bs_row()); splindex<block_cyclic> spl_col(N__ + n__, mtrx__.blacs_grid().num_ranks_col(), mtrx__.blacs_grid().rank_col(), mtrx__.bs_col()); double_complex cs(0, 0); for (int i = 0; i < spl_col.local_size(); i++) { for (int j = 0; j < spl_row.local_size(); j++) { cs += mtrx__(j, i); } } mtrx__.blacs_grid().comm().allreduce(&cs, 1); if (ctx_.comm_band().rank() == 0) { print_checksum("subspace_mtrx", cs); } } /* kill any numerical noise */ mtrx__.make_real_diag(N__ + n__); /* save new matrix */ if (mtrx_old__.size()) { splindex<block_cyclic> spl_row(N__ + n__, mtrx__.blacs_grid().num_ranks_row(), mtrx__.blacs_grid().rank_row(), mtrx__.bs_row()); splindex<block_cyclic> spl_col(N__ + n__, mtrx__.blacs_grid().num_ranks_col(), mtrx__.blacs_grid().rank_col(), mtrx__.bs_col()); #pragma omp parallel for schedule(static) for (int i = 0; i < spl_col.local_size(); i++) { std::copy(&mtrx__(0, i), &mtrx__(0, i) + spl_row.local_size(), &mtrx_old__(0, i)); } } } /// Diagonalize a pseudo-potential Hamiltonian. template <typename T> int diag_pseudo_potential(K_point* kp__, Hamiltonian& H__) const { PROFILE("sirius::Band::diag_pseudo_potential"); H__.local_op().prepare(kp__->gkvec_partition()); ctx_.fft_coarse().prepare(kp__->gkvec_partition()); int niter{0}; auto& itso = ctx_.iterative_solver_input(); if (itso.type_ == "exact") { if (ctx_.num_mag_dims() != 3) { for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) { diag_pseudo_potential_exact<double_complex>(kp__, ispn, H__); } } else { STOP(); } } else if (itso.type_ == "davidson") { niter = diag_pseudo_potential_davidson<T>(kp__, H__); } else if (itso.type_ == "rmm-diis") { if (ctx_.num_mag_dims() != 3) { for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) { diag_pseudo_potential_rmm_diis<T>(kp__, ispn, H__); } } else { STOP(); } } else if (itso.type_ == "chebyshev") { P_operator<T> p_op(ctx_, kp__->p_mtrx()); if (ctx_.num_mag_dims() != 3) { for (int ispn = 0; ispn < ctx_.num_spins(); ispn++) { diag_pseudo_potential_chebyshev<T>(kp__, ispn, H__, p_op); } } else { STOP(); } } else { TERMINATE("unknown iterative solver type"); } /* check residuals */ if (ctx_.control().verification_ >= 1) { check_residuals<T>(kp__, H__); } ctx_.fft_coarse().dismiss(); return niter; } template <typename T> void check_residuals(K_point* kp__, Hamiltonian& H__) const { if (kp__->comm().rank() == 0) { printf("checking residuals\n"); } const bool nc_mag = (ctx_.num_mag_dims() == 3); const int num_sc = nc_mag ? 2 : 1; auto& psi = kp__->spinor_wave_functions(); Wave_functions hpsi(kp__->gkvec_partition(), ctx_.num_bands(), num_sc); Wave_functions spsi(kp__->gkvec_partition(), ctx_.num_bands(), num_sc); Wave_functions res(kp__->gkvec_partition(), ctx_.num_bands(), num_sc); /* compute residuals */ for (int ispin_step = 0; ispin_step < ctx_.num_spin_dims(); ispin_step++) { if (nc_mag) { /* apply Hamiltonian and S operators to the wave-functions */ H__.apply_h_s<T>(kp__, 2, 0, ctx_.num_bands(), psi, hpsi, spsi); } else { Wave_functions phi(&psi.pw_coeffs(ispin_step).prime(0, 0), kp__->gkvec_partition(), ctx_.num_bands(), 1); /* apply Hamiltonian and S operators to the wave-functions */ H__.apply_h_s<T>(kp__, ispin_step, 0, ctx_.num_bands(), phi, hpsi, spsi); } for (int ispn = 0; ispn < num_sc; ispn++) { #pragma omp parallel for schedule(static) for (int j = 0; j < ctx_.num_bands(); j++) { for (int ig = 0; ig < kp__->num_gkvec_loc(); ig++) { res.pw_coeffs(ispn).prime(ig, j) = hpsi.pw_coeffs(ispn).prime(ig, j) - spsi.pw_coeffs(ispn).prime(ig, j) * kp__->band_energy(j, ispin_step); } } } /* get the norm */ auto l2norm = res.l2norm(ctx_.processing_unit(), nc_mag ? 2 : 0, ctx_.num_bands()); if (kp__->comm().rank() == 0) { for (int j = 0; j < ctx_.num_bands(); j++) { printf("band: %3i, residual l2norm: %18.12f\n", j, l2norm[j]); } } } } public: /// Constructor Band(Simulation_context& ctx__) : ctx_(ctx__) , unit_cell_(ctx__.unit_cell()) , blacs_grid_(ctx__.blacs_grid()) { } /// Solve second-variational problem. inline void diag_sv(K_point* kp, Hamiltonian& hamiltonian__) const; /// Solve \f$ \hat H \psi = E \psi \f$ and find eigen-states of the Hamiltonian. inline void solve_for_kset(K_point_set& kset__, Hamiltonian& hamiltonian__, bool precompute__) const; /// Initialize the subspace for the entire k-point set. inline void initialize_subspace(K_point_set& kset__, Hamiltonian& hamiltonian__) const; /// Initialize the wave-functions subspace. template <typename T> inline void initialize_subspace(K_point* kp__, Hamiltonian& hamiltonian__, int num_ao__) const; static double& evp_work_count() { static double evp_work_count_{0}; return evp_work_count_; } }; #include "Band/residuals.hpp" #include "Band/diag_full_potential.hpp" #include "Band/diag_pseudo_potential.hpp" #include "Band/initialize_subspace.hpp" #include "Band/solve.hpp" } #endif // __BAND_H__
par_interp.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.4 $ ***********************************************************************EHEADER*/ #include "headers.h" /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterp *--------------------------------------------------------------------------*/ int hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A, int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, int num_functions, int *dof_func, int debug_flag, double trunc_factor, int max_elmts, int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); double *A_diag_data = hypre_CSRMatrixData(A_diag); int *A_diag_i = hypre_CSRMatrixI(A_diag); int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); double *A_offd_data = hypre_CSRMatrixData(A_offd); int *A_offd_i = hypre_CSRMatrixI(A_offd); int *A_offd_j = hypre_CSRMatrixJ(A_offd); int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); int *S_diag_i = hypre_CSRMatrixI(S_diag); int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); int *S_offd_i = hypre_CSRMatrixI(S_offd); int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P = NULL; int *tmp_map_offd = NULL; int *CF_marker_offd = NULL; int *dof_func_offd = NULL; hypre_CSRMatrix *A_ext; double *A_ext_data; int *A_ext_i; int *A_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; double *P_diag_data = NULL; int *P_diag_i; int *P_diag_j = NULL; double *P_offd_data = NULL; int *P_offd_i; int *P_offd_j = NULL; int P_diag_size, P_offd_size; int *P_marker = NULL; int *P_marker_offd = NULL; int jj_counter,jj_counter_offd; int *jj_count, *jj_count_offd; int jj_begin_row,jj_begin_row_offd; int jj_end_row,jj_end_row_offd; int start_indexing = 0; /* start indexing for P_data at 0 */ int n_fine = hypre_CSRMatrixNumRows(A_diag); int strong_f_marker; int *fine_to_coarse; /*int *fine_to_coarse_offd;*/ int *coarse_counter; int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_BigInt my_first_cpt; int num_cols_P_offd; int i,i1,i2; int j,jl,jj,jj1; int start; int sgn; int c_num; double diagonal; double sum; double distribute; double zero = 0.0; double one = 1.0; int my_id; int num_procs; int num_threadsID; int num_sends; int index; int ns, ne, size, rest; int *int_buf_data = NULL; double wall_time; /* for debugging instrumentation */ MPI_Comm_size(comm, &num_procs); MPI_Comm_rank(comm,&my_id); num_threadsID = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; MPI_Bcast(&total_global_cpts, 1, MPI_HYPRE_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(int, num_cols_A_offd); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(int, num_cols_A_offd); if (!comm_pkg) { #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_NewCommPkgCreate(A); #else hypre_MatvecCommPkgCreate(A); #endif comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of A *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { A_ext = hypre_ParCSRMatrixExtractConvBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); } /*index = 0; for (i=0; i < num_cols_A_offd; i++) { for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++) { k = A_ext_j[j]; if (k >= col_1 && k < col_n) { A_ext_j[index] = k - col_1; A_ext_data[index++] = A_ext_data[j]; } else { kc = hypre_BinarySearch(col_map_offd,k,num_cols_A_offd); if (kc > -1) { A_ext_j[index] = -kc-1; A_ext_data[index++] = A_ext_data[j]; } } } A_ext_i[i] = index; } for (i = num_cols_A_offd; i > 0; i--) A_ext_i[i] = A_ext_i[i-1]; if (num_procs > 1) A_ext_i[0] = 0;*/ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(int, num_threadsID); jj_count = hypre_CTAlloc(int, num_threadsID); jj_count_offd = hypre_CTAlloc(int, num_threadsID); fine_to_coarse = hypre_CTAlloc(int, n_fine); /*#define HYPRE_SMP_PRIVATE i #include "../utilities/hypre_smp_forloop.h"*/ for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #define HYPRE_SMP_PRIVATE i,j,i1,jj,ns,ne,size,rest #include "../utilities/hypre_smp_forloop.h" for (j = 0; j < num_threadsID; j++) { size = n_fine/num_threadsID; rest = n_fine - size*num_threadsID; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threadsID-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threadsID-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(int, n_fine+1); if (P_diag_size) { P_diag_j = hypre_CTAlloc(int, P_diag_size); P_diag_data = hypre_CTAlloc(double, P_diag_size); } P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(int, n_fine+1); if (P_offd_size) { P_offd_j = hypre_CTAlloc(int, P_offd_size); P_offd_data = hypre_CTAlloc(double, P_offd_size); } /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd); */ #define HYPRE_SMP_PRIVATE i,j,ns,ne,size,rest,coarse_shift #include "../utilities/hypre_smp_forloop.h" for (j = 0; j < num_threadsID; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threadsID; rest = n_fine - size*num_threadsID; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = my_first_cpt+ (HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } */ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #define HYPRE_SMP_PRIVATE i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd #include "../utilities/hypre_smp_forloop.h" for (jl = 0; jl < num_threadsID; jl++) { size = n_fine/num_threadsID; rest = n_fine - size*num_threadsID; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; if (n_fine) P_marker = hypre_CTAlloc(int, n_fine); else P_marker = NULL; if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(int, num_cols_A_offd); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { sum += A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } if (sum != 0) { distribute = A_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of A for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1]; } } } } else { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = A_offd_j[jj]; sgn = 1; if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1; for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = A_ext_j[jj1]; if (i2 > -1) { /* in the diagonal block */ if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = A_ext_j[jj1]; if (i2 > -1) /* in the diagonal block */ { if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[-i2-1]] += distribute * A_ext_data[jj1]; } } } else { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ if (diagonal == 0.0) { printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i); diagonal = A_diag_data[A_diag_i[i]]; } for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } strong_f_marker--; P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker); hypre_TFree(P_marker_offd); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { if (num_cols_A_offd) P_marker = hypre_CTAlloc(int, num_cols_A_offd); /*#define HYPRE_SMP_PRIVATE i #include "../utilities/hypre_smp_forloop.h"*/ for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } if (num_cols_P_offd) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt,num_cols_P_offd); tmp_map_offd = hypre_CTAlloc(int,num_cols_P_offd); } index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } /*#define HYPRE_SMP_PRIVATE i #include "../utilities/hypre_smp_forloop.h"*/ for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } if (num_procs > 1) hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd); hypre_TFree(CF_marker_offd); hypre_TFree(dof_func_offd); hypre_TFree(int_buf_data); hypre_TFree(fine_to_coarse); /*hypre_TFree(fine_to_coarse_offd);*/ hypre_TFree(coarse_counter); hypre_TFree(jj_count); hypre_TFree(jj_count_offd); if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext); return(0); } int hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P, double trunc_factor, int max_elmts) { hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P); int *P_diag_i = hypre_CSRMatrixI(P_diag); int *P_diag_j = hypre_CSRMatrixJ(P_diag); double *P_diag_data = hypre_CSRMatrixData(P_diag); int *P_diag_j_new; double *P_diag_data_new; hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P); int *P_offd_i = hypre_CSRMatrixI(P_offd); int *P_offd_j = hypre_CSRMatrixJ(P_offd); double *P_offd_data = hypre_CSRMatrixData(P_offd); int *P_offd_j_new; double *P_offd_data_new; int n_fine = hypre_CSRMatrixNumRows(P_diag); int num_cols = hypre_CSRMatrixNumCols(P_diag); int i, j, start_j; int ierr = 0; double max_coef; int next_open = 0; int now_checking = 0; int next_open_offd = 0; int now_checking_offd = 0; int num_lost = 0; int num_lost_offd = 0; int num_lost_global = 0; int num_lost_global_offd = 0; int P_diag_size; int P_offd_size; int num_elmts; int cnt, cnt_diag, cnt_offd; double row_sum; double scale; /* Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. Cum_lost_per_thread * will temporarily store the cumulative number of dropped entries up to * each thread. */ int my_thread_num, num_threadsID, start, stop; int * max_num_threads = hypre_CTAlloc(int, 1); int * cum_lost_per_thread; int * num_lost_per_thread; int * num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(int, max_num_threads[0]); num_lost_per_thread = hypre_CTAlloc(int, max_num_threads[0]); num_lost_offd_per_thread = hypre_CTAlloc(int, max_num_threads[0]); for(i=0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threadsID,max_coef,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt) #endif { my_thread_num = hypre_GetThreadNum(); num_threadsID = hypre_NumActiveThreads(); /* Compute each thread's range of rows to truncate and compress. Note, * that i, j and data are all compressed as entries are dropped, but * that the compression only occurs locally over each thread's row * range. P_diag_i is only made globally consistent at the end of this * routine. During the dropping phases, P_diag_i[stop] will point to * the start of the next thread's row range. */ /* my row range */ start = (n_fine/num_threadsID)*my_thread_num; if (my_thread_num == num_threadsID-1) { stop = n_fine; } else { stop = (n_fine/num_threadsID)*(my_thread_num+1); } /* * Truncate based on truncation tolerance */ if (trunc_factor > 0) { num_lost_offd = 0; next_open = P_diag_i[start]; now_checking = P_diag_i[start]; next_open_offd = P_offd_i[start];; now_checking_offd = P_offd_i[start];; for (i = start; i < stop; i++) { max_coef = 0; for (j = P_diag_i[i]; j < P_diag_i[i+1]; j++) max_coef = (max_coef < fabs(P_diag_data[j])) ? fabs(P_diag_data[j]) : max_coef; for (j = P_offd_i[i]; j < P_offd_i[i+1]; j++) max_coef = (max_coef < fabs(P_offd_data[j])) ? fabs(P_offd_data[j]) : max_coef; max_coef *= trunc_factor; start_j = P_diag_i[i]; P_diag_i[i] -= num_lost; row_sum = 0; scale = 0; for (j = start_j; j < P_diag_i[i+1]; j++) { row_sum += P_diag_data[now_checking]; if (fabs(P_diag_data[now_checking]) < max_coef) { num_lost++; now_checking++; } else { scale += P_diag_data[now_checking]; P_diag_data[next_open] = P_diag_data[now_checking]; P_diag_j[next_open] = P_diag_j[now_checking]; now_checking++; next_open++; } } start_j = P_offd_i[i]; P_offd_i[i] -= num_lost_offd; for (j = start_j; j < P_offd_i[i+1]; j++) { row_sum += P_offd_data[now_checking_offd]; if (fabs(P_offd_data[now_checking_offd]) < max_coef) { num_lost_offd++; now_checking_offd++; } else { scale += P_offd_data[now_checking_offd]; P_offd_data[next_open_offd] = P_offd_data[now_checking_offd]; P_offd_j[next_open_offd] = P_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* normalize row of P */ if (scale != 0.) { if (scale != row_sum) { scale = row_sum/scale; for (j = P_diag_i[i]; j < (P_diag_i[i+1]-num_lost); j++) P_diag_data[j] *= scale; for (j = P_offd_i[i]; j < (P_offd_i[i+1]-num_lost_offd); j++) P_offd_data[j] *= scale; } } } /* store number of dropped elements and number of threads */ if(my_thread_num == 0) { max_num_threads[0] = num_threadsID; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /*P_diag_i[n_fine] -= num_lost; P_offd_i[n_fine] -= num_lost_offd; } */ if (max_elmts > 0) { int P_mxnum, cnt1, last_index, last_index_offd; int *P_aux_j; double *P_aux_data; /* find maximum row length locally over this row range */ P_mxnum = 0; for (i=start; i<stop; i++) { /* Note P_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = P_diag_i[i+1]; last_index_offd = P_offd_i[i+1]; if(i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index-P_diag_i[i] + last_index_offd-P_offd_i[i]; if (cnt1 > P_mxnum) P_mxnum = cnt1; } /*rowlength = 0; if (n_fine) rowlength = P_diag_i[1]+P_offd_i[1]; P_mxnum = rowlength; for (i=1; i<n_fine; i++) { rowlength = P_diag_i[i+1]-P_diag_i[i]+P_offd_i[i+1]-P_offd_i[i]; if (rowlength > P_mxnum) P_mxnum = rowlength; }*/ if (P_mxnum > max_elmts) { num_lost = 0; num_lost_offd = 0; /* two temporary arrays to hold row i for temporary operations */ P_aux_j = hypre_CTAlloc(int, P_mxnum); P_aux_data = hypre_CTAlloc(double, P_mxnum); cnt_diag = P_diag_i[start]; cnt_offd = P_offd_i[start]; for (i = start; i < stop; i++) { /* Note P_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = P_diag_i[i+1]; last_index_offd = P_offd_i[i+1]; if(i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = P_diag_i[i+1]-P_diag_i[i]+P_offd_i[i+1]-P_offd_i[i]; if (max_elmts < num_elmts) { cnt = 0; for (j = P_diag_i[i]; j < P_diag_i[i+1]; j++) { P_aux_j[cnt] = P_diag_j[j]; P_aux_data[cnt++] = P_diag_data[j]; row_sum += P_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = P_offd_i[i]; j < P_offd_i[i+1]; j++) { P_aux_j[cnt] = P_offd_j[j]+num_cols; P_aux_data[cnt++] = P_offd_data[j]; row_sum += P_offd_data[j]; } num_lost_offd += cnt-cnt1; /* sort data */ hypre_qsort2abs(P_aux_j,P_aux_data,0,cnt-1); scale = 0; P_diag_i[i] = cnt_diag; P_offd_i[i] = cnt_offd; for (j = 0; j < max_elmts; j++) { scale += P_aux_data[j]; if (P_aux_j[j] < num_cols) { P_diag_j[cnt_diag] = P_aux_j[j]; P_diag_data[cnt_diag++] = P_aux_data[j]; } else { P_offd_j[cnt_offd] = P_aux_j[j]-num_cols; P_offd_data[cnt_offd++] = P_aux_data[j]; } } num_lost -= cnt_diag-P_diag_i[i]; num_lost_offd -= cnt_offd-P_offd_i[i]; /* normalize row of P */ if (scale != 0.) { if (scale != row_sum) { scale = row_sum/scale; for (j = P_diag_i[i]; j < cnt_diag; j++) P_diag_data[j] *= scale; for (j = P_offd_i[i]; j < cnt_offd; j++) P_offd_data[j] *= scale; } } } else { if (P_diag_i[i] != cnt_diag) { start_j = P_diag_i[i]; P_diag_i[i] = cnt_diag; for (j = start_j; j < P_diag_i[i+1]; j++) { P_diag_j[cnt_diag] = P_diag_j[j]; P_diag_data[cnt_diag++] = P_diag_data[j]; } } else cnt_diag += P_diag_i[i+1]-P_diag_i[i]; if (P_offd_i[i] != cnt_offd) { start_j = P_offd_i[i]; P_offd_i[i] = cnt_offd; for (j = start_j; j < P_offd_i[i+1]; j++) { P_offd_j[cnt_offd] = P_offd_j[j]; P_offd_data[cnt_offd++] = P_offd_data[j]; } } else cnt_offd += P_offd_i[i+1]-P_offd_i[i]; } } num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(P_aux_j); hypre_TFree(P_aux_data); } /* end if (P_mxnum > max_elmts) */ } /* end if (max_elmts > 0) */ /* Sum up num_lost_global */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for(i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* Each thread has it's own locally compressed CSR matrix from rows start * to stop. Now, we have to copy each thread's chunk into the new * process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * P_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if(my_thread_num == 0) { P_diag_size = P_diag_i[n_fine]; for(i = 0; i < max_num_threads[0]; i++) { P_diag_size -= num_lost_per_thread[i]; if(i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } P_diag_j_new = hypre_CTAlloc(int,P_diag_size); P_diag_data_new = hypre_CTAlloc(double,P_diag_size); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if(my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = P_diag_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for(i = P_diag_i[start]; i < P_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { P_diag_j_new[next_open] = P_diag_j[i]; P_diag_data_new[next_open] = P_diag_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update P_diag_i with number of dropped entries by all lower ranked * threads */ if(my_thread_num > 0) { for(i=start; i<stop; i++) { P_diag_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if(my_thread_num == 0) { /* Set last entry */ P_diag_i[n_fine] = P_diag_size ; hypre_TFree(P_diag_j); hypre_TFree(P_diag_data); hypre_CSRMatrixJ(P_diag) = P_diag_j_new; hypre_CSRMatrixData(P_diag) = P_diag_data_new; hypre_CSRMatrixNumNonzeros(P_diag) = P_diag_size; } } /* * Synchronize and create new offd data structures */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if(my_thread_num == 0) { P_offd_size = P_offd_i[n_fine]; for(i = 0; i < max_num_threads[0]; i++) { P_offd_size -= num_lost_offd_per_thread[i]; if(i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } P_offd_j_new = hypre_CTAlloc(int,P_offd_size); P_offd_data_new = hypre_CTAlloc(double,P_offd_size); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if(my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = P_offd_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for(i = P_offd_i[start]; i < P_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { P_offd_j_new[next_open] = P_offd_j[i]; P_offd_data_new[next_open] = P_offd_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update P_offd_i with number of dropped entries by all lower ranked * threads */ if(my_thread_num > 0) { for(i=start; i<stop; i++) { P_offd_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if(my_thread_num == 0) { /* Set last entry */ P_offd_i[n_fine] = P_offd_size ; hypre_TFree(P_offd_j); hypre_TFree(P_offd_data); hypre_CSRMatrixJ(P_offd) = P_offd_j_new; hypre_CSRMatrixData(P_offd) = P_offd_data_new; hypre_CSRMatrixNumNonzeros(P_offd) = P_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads); hypre_TFree(cum_lost_per_thread); hypre_TFree(num_lost_per_thread); hypre_TFree(num_lost_offd_per_thread); return ierr; } void hypre_qsort2abs( int *v, double *w, int left, int right ) { int i, last; if (left >= right) return; swap2( v, w, left, (left+right)/2); last = left; for (i = left+1; i <= right; i++) if (fabs(w[i]) > fabs(w[left])) { swap2(v, w, ++last, i); } swap2(v, w, left, last); hypre_qsort2abs(v, w, left, last-1); hypre_qsort2abs(v, w, last+1, right); }
team.c
/* Copyright (C) 2005-2017 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>. This file is part of the GNU Offloading and Multi Processing Library (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* This file handles the maintainence of threads in response to team creation and termination. */ #include "libgomp.h" #include "pool.h" #include <stdlib.h> #include <string.h> #ifdef LIBGOMP_USE_PTHREADS /* This attribute contains PTHREAD_CREATE_DETACHED. */ pthread_attr_t gomp_thread_attr; /* This key is for the thread destructor. */ pthread_key_t gomp_thread_destructor; /* This is the libgomp per-thread data structure. */ #if defined HAVE_TLS || defined USE_EMUTLS __thread struct gomp_thread gomp_tls_data; #else pthread_key_t gomp_tls_key; #endif /* This structure is used to communicate across pthread_create. */ struct gomp_thread_start_data { void (*fn) (void *); void *fn_data; struct gomp_team_state ts; struct gomp_task *task; struct gomp_thread_pool *thread_pool; unsigned int place; bool nested; }; /* This function is a pthread_create entry point. This contains the idle loop in which a thread waits to be called up to become part of a team. */ static void * gomp_thread_start (void *xdata) { struct gomp_thread_start_data *data = xdata; struct gomp_thread *thr; struct gomp_thread_pool *pool; void (*local_fn) (void *); void *local_data; #if defined HAVE_TLS || defined USE_EMUTLS thr = &gomp_tls_data; #else struct gomp_thread local_thr; thr = &local_thr; pthread_setspecific (gomp_tls_key, thr); #endif gomp_sem_init (&thr->release, 0); /* Extract what we need from data. */ local_fn = data->fn; local_data = data->fn_data; thr->thread_pool = data->thread_pool; thr->ts = data->ts; thr->task = data->task; thr->place = data->place; thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release; /* Make thread pool local. */ pool = thr->thread_pool; if (data->nested) { struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; gomp_barrier_wait (&team->barrier); local_fn (local_data); gomp_team_barrier_wait_final (&team->barrier); gomp_finish_task (task); gomp_barrier_wait_last (&team->barrier); } else { pool->threads[thr->ts.team_id] = thr; gomp_simple_barrier_wait (&pool->threads_dock); do { struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; local_fn (local_data); gomp_team_barrier_wait_final (&team->barrier); gomp_finish_task (task); gomp_simple_barrier_wait (&pool->threads_dock); local_fn = thr->fn; local_data = thr->data; thr->fn = NULL; } while (local_fn); } gomp_sem_destroy (&thr->release); thr->thread_pool = NULL; thr->task = NULL; return NULL; } #endif static inline struct gomp_team * get_last_team (unsigned nthreads) { struct gomp_thread *thr = gomp_thread (); if (thr->ts.team == NULL) { struct gomp_thread_pool *pool = gomp_get_thread_pool (thr, nthreads); struct gomp_team *last_team = pool->last_team; if (last_team != NULL && last_team->nthreads == nthreads) { pool->last_team = NULL; return last_team; } } return NULL; } /* Create a new team data structure. */ struct gomp_team * gomp_new_team (unsigned nthreads) { struct gomp_team *team; int i; team = get_last_team (nthreads); if (team == NULL) { size_t extra = sizeof (team->ordered_release[0]) + sizeof (team->implicit_task[0]); team = gomp_malloc (sizeof (*team) + nthreads * extra); #ifndef HAVE_SYNC_BUILTINS gomp_mutex_init (&team->work_share_list_free_lock); #endif gomp_barrier_init (&team->barrier, nthreads); gomp_mutex_init (&team->task_lock); team->nthreads = nthreads; } team->work_share_chunk = 8; #ifdef HAVE_SYNC_BUILTINS team->single_count = 0; #endif team->work_shares_to_free = &team->work_shares[0]; gomp_init_work_share (&team->work_shares[0], false, nthreads); team->work_shares[0].next_alloc = NULL; team->work_share_list_free = NULL; team->work_share_list_alloc = &team->work_shares[1]; for (i = 1; i < 7; i++) team->work_shares[i].next_free = &team->work_shares[i + 1]; team->work_shares[i].next_free = NULL; gomp_sem_init (&team->master_release, 0); team->ordered_release = (void *) &team->implicit_task[nthreads]; team->ordered_release[0] = &team->master_release; priority_queue_init (&team->task_queue); team->task_count = 0; team->task_queued_count = 0; team->task_running_count = 0; team->work_share_cancelled = 0; team->team_cancelled = 0; return team; } /* Free a team data structure. */ static void free_team (struct gomp_team *team) { #ifndef HAVE_SYNC_BUILTINS gomp_mutex_destroy (&team->work_share_list_free_lock); #endif gomp_barrier_destroy (&team->barrier); gomp_mutex_destroy (&team->task_lock); priority_queue_free (&team->task_queue); free (team); } static void gomp_free_pool_helper (void *thread_pool) { struct gomp_thread *thr = gomp_thread (); struct gomp_thread_pool *pool = (struct gomp_thread_pool *) thread_pool; gomp_simple_barrier_wait_last (&pool->threads_dock); gomp_sem_destroy (&thr->release); thr->thread_pool = NULL; thr->task = NULL; #ifdef LIBGOMP_USE_PTHREADS pthread_exit (NULL); #elif defined(__nvptx__) asm ("exit;"); #else #error gomp_free_pool_helper must terminate the thread #endif } /* Free a thread pool and release its threads. */ void gomp_free_thread (void *arg __attribute__((unused))) { struct gomp_thread *thr = gomp_thread (); struct gomp_thread_pool *pool = thr->thread_pool; if (pool) { if (pool->threads_used > 0) { int i; for (i = 1; i < pool->threads_used; i++) { struct gomp_thread *nthr = pool->threads[i]; nthr->fn = gomp_free_pool_helper; nthr->data = pool; } /* This barrier undocks threads docked on pool->threads_dock. */ gomp_simple_barrier_wait (&pool->threads_dock); /* And this waits till all threads have called gomp_barrier_wait_last in gomp_free_pool_helper. */ gomp_simple_barrier_wait (&pool->threads_dock); /* Now it is safe to destroy the barrier and free the pool. */ gomp_simple_barrier_destroy (&pool->threads_dock); #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&gomp_managed_threads, 1L - pool->threads_used); #else gomp_mutex_lock (&gomp_managed_threads_lock); gomp_managed_threads -= pool->threads_used - 1L; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif } if (pool->last_team) free_team (pool->last_team); #ifndef __nvptx__ free (pool->threads); free (pool); #endif thr->thread_pool = NULL; } if (thr->ts.level == 0 && __builtin_expect (thr->ts.team != NULL, 0)) gomp_team_end (); if (thr->task != NULL) { struct gomp_task *task = thr->task; gomp_end_task (); free (task); } } /* Launch a team. */ #ifdef LIBGOMP_USE_PTHREADS void gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads, unsigned flags, struct gomp_team *team) { struct gomp_thread_start_data *start_data; struct gomp_thread *thr, *nthr; struct gomp_task *task; struct gomp_task_icv *icv; bool nested; struct gomp_thread_pool *pool; unsigned i, n, old_threads_used = 0; pthread_attr_t thread_attr, *attr; unsigned long nthreads_var; char bind, bind_var; unsigned int s = 0, rest = 0, p = 0, k = 0; unsigned int affinity_count = 0; struct gomp_thread **affinity_thr = NULL; thr = gomp_thread (); nested = thr->ts.level; pool = thr->thread_pool; task = thr->task; icv = task ? &task->icv : &gomp_global_icv; if (__builtin_expect (gomp_places_list != NULL, 0) && thr->place == 0) gomp_init_affinity (); /* Always save the previous state, even if this isn't a nested team. In particular, we should save any work share state from an outer orphaned work share construct. */ team->prev_ts = thr->ts; thr->ts.team = team; thr->ts.team_id = 0; ++thr->ts.level; if (nthreads > 1) ++thr->ts.active_level; thr->ts.work_share = &team->work_shares[0]; thr->ts.last_work_share = NULL; #ifdef HAVE_SYNC_BUILTINS thr->ts.single_count = 0; #endif thr->ts.static_trip = 0; thr->task = &team->implicit_task[0]; nthreads_var = icv->nthreads_var; if (__builtin_expect (gomp_nthreads_var_list != NULL, 0) && thr->ts.level < gomp_nthreads_var_list_len) nthreads_var = gomp_nthreads_var_list[thr->ts.level]; bind_var = icv->bind_var; if (bind_var != omp_proc_bind_false && (flags & 7) != omp_proc_bind_false) bind_var = flags & 7; bind = bind_var; if (__builtin_expect (gomp_bind_var_list != NULL, 0) && thr->ts.level < gomp_bind_var_list_len) bind_var = gomp_bind_var_list[thr->ts.level]; gomp_init_task (thr->task, task, icv); team->implicit_task[0].icv.nthreads_var = nthreads_var; team->implicit_task[0].icv.bind_var = bind_var; if (nthreads == 1) return; i = 1; if (__builtin_expect (gomp_places_list != NULL, 0)) { /* Depending on chosen proc_bind model, set subpartition for the master thread and initialize helper variables P and optionally S, K and/or REST used by later place computation for each additional thread. */ p = thr->place - 1; switch (bind) { case omp_proc_bind_true: case omp_proc_bind_close: if (nthreads > thr->ts.place_partition_len) { /* T > P. S threads will be placed in each place, and the final REM threads placed one by one into the already occupied places. */ s = nthreads / thr->ts.place_partition_len; rest = nthreads % thr->ts.place_partition_len; } else s = 1; k = 1; break; case omp_proc_bind_master: /* Each thread will be bound to master's place. */ break; case omp_proc_bind_spread: if (nthreads <= thr->ts.place_partition_len) { /* T <= P. Each subpartition will have in between s and s+1 places (subpartitions starting at or after rest will have s places, earlier s+1 places), each thread will be bound to the first place in its subpartition (except for the master thread that can be bound to another place in its subpartition). */ s = thr->ts.place_partition_len / nthreads; rest = thr->ts.place_partition_len % nthreads; rest = (s + 1) * rest + thr->ts.place_partition_off; if (p < rest) { p -= (p - thr->ts.place_partition_off) % (s + 1); thr->ts.place_partition_len = s + 1; } else { p -= (p - rest) % s; thr->ts.place_partition_len = s; } thr->ts.place_partition_off = p; } else { /* T > P. Each subpartition will have just a single place and we'll place between s and s+1 threads into each subpartition. */ s = nthreads / thr->ts.place_partition_len; rest = nthreads % thr->ts.place_partition_len; thr->ts.place_partition_off = p; thr->ts.place_partition_len = 1; k = 1; } break; } } else bind = omp_proc_bind_false; /* We only allow the reuse of idle threads for non-nested PARALLEL regions. This appears to be implied by the semantics of threadprivate variables, but perhaps that's reading too much into things. Certainly it does prevent any locking problems, since only the initial program thread will modify gomp_threads. */ if (!nested) { old_threads_used = pool->threads_used; if (nthreads <= old_threads_used) n = nthreads; else if (old_threads_used == 0) { n = 0; gomp_simple_barrier_init (&pool->threads_dock, nthreads); } else { n = old_threads_used; /* Increase the barrier threshold to make sure all new threads arrive before the team is released. */ gomp_simple_barrier_reinit (&pool->threads_dock, nthreads); } /* Not true yet, but soon will be. We're going to release all threads from the dock, and those that aren't part of the team will exit. */ pool->threads_used = nthreads; /* If necessary, expand the size of the gomp_threads array. It is expected that changes in the number of threads are rare, thus we make no effort to expand gomp_threads_size geometrically. */ if (nthreads >= pool->threads_size) { pool->threads_size = nthreads + 1; pool->threads = gomp_realloc (pool->threads, pool->threads_size * sizeof (struct gomp_thread_data *)); } /* Release existing idle threads. */ for (; i < n; ++i) { unsigned int place_partition_off = thr->ts.place_partition_off; unsigned int place_partition_len = thr->ts.place_partition_len; unsigned int place = 0; if (__builtin_expect (gomp_places_list != NULL, 0)) { switch (bind) { case omp_proc_bind_true: case omp_proc_bind_close: if (k == s) { ++p; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; k = 1; if (i == nthreads - rest) s = 1; } else ++k; break; case omp_proc_bind_master: break; case omp_proc_bind_spread: if (k == 0) { /* T <= P. */ if (p < rest) p += s + 1; else p += s; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; place_partition_off = p; if (p < rest) place_partition_len = s + 1; else place_partition_len = s; } else { /* T > P. */ if (k == s) { ++p; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; k = 1; if (i == nthreads - rest) s = 1; } else ++k; place_partition_off = p; place_partition_len = 1; } break; } if (affinity_thr != NULL || (bind != omp_proc_bind_true && pool->threads[i]->place != p + 1) || pool->threads[i]->place <= place_partition_off || pool->threads[i]->place > (place_partition_off + place_partition_len)) { unsigned int l; if (affinity_thr == NULL) { unsigned int j; if (team->prev_ts.place_partition_len > 64) affinity_thr = gomp_malloc (team->prev_ts.place_partition_len * sizeof (struct gomp_thread *)); else affinity_thr = gomp_alloca (team->prev_ts.place_partition_len * sizeof (struct gomp_thread *)); memset (affinity_thr, '\0', team->prev_ts.place_partition_len * sizeof (struct gomp_thread *)); for (j = i; j < old_threads_used; j++) { if (pool->threads[j]->place > team->prev_ts.place_partition_off && (pool->threads[j]->place <= (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len))) { l = pool->threads[j]->place - 1 - team->prev_ts.place_partition_off; pool->threads[j]->data = affinity_thr[l]; affinity_thr[l] = pool->threads[j]; } pool->threads[j] = NULL; } if (nthreads > old_threads_used) memset (&pool->threads[old_threads_used], '\0', ((nthreads - old_threads_used) * sizeof (struct gomp_thread *))); n = nthreads; affinity_count = old_threads_used - i; } if (affinity_count == 0) break; l = p; if (affinity_thr[l - team->prev_ts.place_partition_off] == NULL) { if (bind != omp_proc_bind_true) continue; for (l = place_partition_off; l < place_partition_off + place_partition_len; l++) if (affinity_thr[l - team->prev_ts.place_partition_off] != NULL) break; if (l == place_partition_off + place_partition_len) continue; } nthr = affinity_thr[l - team->prev_ts.place_partition_off]; affinity_thr[l - team->prev_ts.place_partition_off] = (struct gomp_thread *) nthr->data; affinity_count--; pool->threads[i] = nthr; } else nthr = pool->threads[i]; place = p + 1; } else nthr = pool->threads[i]; nthr->ts.team = team; nthr->ts.work_share = &team->work_shares[0]; nthr->ts.last_work_share = NULL; nthr->ts.team_id = i; nthr->ts.level = team->prev_ts.level + 1; nthr->ts.active_level = thr->ts.active_level; nthr->ts.place_partition_off = place_partition_off; nthr->ts.place_partition_len = place_partition_len; #ifdef HAVE_SYNC_BUILTINS nthr->ts.single_count = 0; #endif nthr->ts.static_trip = 0; nthr->task = &team->implicit_task[i]; nthr->place = place; gomp_init_task (nthr->task, task, icv); team->implicit_task[i].icv.nthreads_var = nthreads_var; team->implicit_task[i].icv.bind_var = bind_var; nthr->fn = fn; nthr->data = data; team->ordered_release[i] = &nthr->release; } if (__builtin_expect (affinity_thr != NULL, 0)) { /* If AFFINITY_THR is non-NULL just because we had to permute some threads in the pool, but we've managed to find exactly as many old threads as we'd find without affinity, we don't need to handle this specially anymore. */ if (nthreads <= old_threads_used ? (affinity_count == old_threads_used - nthreads) : (i == old_threads_used)) { if (team->prev_ts.place_partition_len > 64) free (affinity_thr); affinity_thr = NULL; affinity_count = 0; } else { i = 1; /* We are going to compute the places/subpartitions again from the beginning. So, we need to reinitialize vars modified by the switch (bind) above inside of the loop, to the state they had after the initial switch (bind). */ switch (bind) { case omp_proc_bind_true: case omp_proc_bind_close: if (nthreads > thr->ts.place_partition_len) /* T > P. S has been changed, so needs to be recomputed. */ s = nthreads / thr->ts.place_partition_len; k = 1; p = thr->place - 1; break; case omp_proc_bind_master: /* No vars have been changed. */ break; case omp_proc_bind_spread: p = thr->ts.place_partition_off; if (k != 0) { /* T > P. */ s = nthreads / team->prev_ts.place_partition_len; k = 1; } break; } /* Increase the barrier threshold to make sure all new threads and all the threads we're going to let die arrive before the team is released. */ if (affinity_count) gomp_simple_barrier_reinit (&pool->threads_dock, nthreads + affinity_count); } } if (i == nthreads) goto do_release; } if (__builtin_expect (nthreads + affinity_count > old_threads_used, 0)) { long diff = (long) (nthreads + affinity_count) - (long) old_threads_used; if (old_threads_used == 0) --diff; #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&gomp_managed_threads, diff); #else gomp_mutex_lock (&gomp_managed_threads_lock); gomp_managed_threads += diff; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif } attr = &gomp_thread_attr; if (__builtin_expect (gomp_places_list != NULL, 0)) { size_t stacksize; pthread_attr_init (&thread_attr); pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED); if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize)) pthread_attr_setstacksize (&thread_attr, stacksize); attr = &thread_attr; } start_data = gomp_alloca (sizeof (struct gomp_thread_start_data) * (nthreads-i)); /* Launch new threads. */ for (; i < nthreads; ++i) { pthread_t pt; int err; start_data->ts.place_partition_off = thr->ts.place_partition_off; start_data->ts.place_partition_len = thr->ts.place_partition_len; start_data->place = 0; if (__builtin_expect (gomp_places_list != NULL, 0)) { switch (bind) { case omp_proc_bind_true: case omp_proc_bind_close: if (k == s) { ++p; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; k = 1; if (i == nthreads - rest) s = 1; } else ++k; break; case omp_proc_bind_master: break; case omp_proc_bind_spread: if (k == 0) { /* T <= P. */ if (p < rest) p += s + 1; else p += s; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; start_data->ts.place_partition_off = p; if (p < rest) start_data->ts.place_partition_len = s + 1; else start_data->ts.place_partition_len = s; } else { /* T > P. */ if (k == s) { ++p; if (p == (team->prev_ts.place_partition_off + team->prev_ts.place_partition_len)) p = team->prev_ts.place_partition_off; k = 1; if (i == nthreads - rest) s = 1; } else ++k; start_data->ts.place_partition_off = p; start_data->ts.place_partition_len = 1; } break; } start_data->place = p + 1; if (affinity_thr != NULL && pool->threads[i] != NULL) continue; gomp_init_thread_affinity (attr, p); } start_data->fn = fn; start_data->fn_data = data; start_data->ts.team = team; start_data->ts.work_share = &team->work_shares[0]; start_data->ts.last_work_share = NULL; start_data->ts.team_id = i; start_data->ts.level = team->prev_ts.level + 1; start_data->ts.active_level = thr->ts.active_level; #ifdef HAVE_SYNC_BUILTINS start_data->ts.single_count = 0; #endif start_data->ts.static_trip = 0; start_data->task = &team->implicit_task[i]; gomp_init_task (start_data->task, task, icv); team->implicit_task[i].icv.nthreads_var = nthreads_var; team->implicit_task[i].icv.bind_var = bind_var; start_data->thread_pool = pool; start_data->nested = nested; attr = gomp_adjust_thread_attr (attr, &thread_attr); err = pthread_create (&pt, attr, gomp_thread_start, start_data++); if (err != 0) gomp_fatal ("Thread creation failed: %s", strerror (err)); } if (__builtin_expect (attr == &thread_attr, 0)) pthread_attr_destroy (&thread_attr); do_release: if (nested) gomp_barrier_wait (&team->barrier); else gomp_simple_barrier_wait (&pool->threads_dock); /* Decrease the barrier threshold to match the number of threads that should arrive back at the end of this team. The extra threads should be exiting. Note that we arrange for this test to never be true for nested teams. If AFFINITY_COUNT is non-zero, the barrier as well as gomp_managed_threads was temporarily set to NTHREADS + AFFINITY_COUNT. For NTHREADS < OLD_THREADS_COUNT, AFFINITY_COUNT if non-zero will be always at least OLD_THREADS_COUNT - NTHREADS. */ if (__builtin_expect (nthreads < old_threads_used, 0) || __builtin_expect (affinity_count, 0)) { long diff = (long) nthreads - (long) old_threads_used; if (affinity_count) diff = -affinity_count; gomp_simple_barrier_reinit (&pool->threads_dock, nthreads); #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&gomp_managed_threads, diff); #else gomp_mutex_lock (&gomp_managed_threads_lock); gomp_managed_threads += diff; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif } if (__builtin_expect (affinity_thr != NULL, 0) && team->prev_ts.place_partition_len > 64) free (affinity_thr); } #endif /* Terminate the current team. This is only to be called by the master thread. We assume that we must wait for the other threads. */ void gomp_team_end (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; /* This barrier handles all pending explicit threads. As #pragma omp cancel parallel might get awaited count in team->barrier in a inconsistent state, we need to use a different counter here. */ gomp_team_barrier_wait_final (&team->barrier); if (__builtin_expect (team->team_cancelled, 0)) { struct gomp_work_share *ws = team->work_shares_to_free; do { struct gomp_work_share *next_ws = gomp_ptrlock_get (&ws->next_ws); if (next_ws == NULL) gomp_ptrlock_set (&ws->next_ws, ws); gomp_fini_work_share (ws); ws = next_ws; } while (ws != NULL); } else gomp_fini_work_share (thr->ts.work_share); gomp_end_task (); thr->ts = team->prev_ts; if (__builtin_expect (thr->ts.team != NULL, 0)) { #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads); #else gomp_mutex_lock (&gomp_managed_threads_lock); gomp_managed_threads -= team->nthreads - 1L; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif /* This barrier has gomp_barrier_wait_last counterparts and ensures the team can be safely destroyed. */ gomp_barrier_wait (&team->barrier); } if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0)) { struct gomp_work_share *ws = team->work_shares[0].next_alloc; do { struct gomp_work_share *next_ws = ws->next_alloc; free (ws); ws = next_ws; } while (ws != NULL); } gomp_sem_destroy (&team->master_release); if (__builtin_expect (thr->ts.team != NULL, 0) || __builtin_expect (team->nthreads == 1, 0)) free_team (team); else { struct gomp_thread_pool *pool = thr->thread_pool; if (pool->last_team) free_team (pool->last_team); pool->last_team = team; gomp_release_thread_pool (pool); } } #ifdef LIBGOMP_USE_PTHREADS /* Constructors for this file. */ static void __attribute__((constructor)) initialize_team (void) { #if !defined HAVE_TLS && !defined USE_EMUTLS static struct gomp_thread initial_thread_tls_data; pthread_key_create (&gomp_tls_key, NULL); pthread_setspecific (gomp_tls_key, &initial_thread_tls_data); #endif if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0) gomp_fatal ("could not create thread pool destructor."); } static void __attribute__((destructor)) team_destructor (void) { /* Without this dlclose on libgomp could lead to subsequent crashes. */ pthread_key_delete (gomp_thread_destructor); } #endif struct gomp_task_icv * gomp_new_icv (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task)); gomp_init_task (task, NULL, &gomp_global_icv); thr->task = task; #ifdef LIBGOMP_USE_PTHREADS pthread_setspecific (gomp_thread_destructor, thr); #endif return &task->icv; }
raytracer.h
#pragma once #include "resource.h" #include <iostream> #include <linalg.h> #include <memory> #include <omp.h> #include <random> using namespace linalg::aliases; namespace cg::renderer { struct ray { ray(float3 position, float3 direction) : position(position) { this->direction = normalize(direction); } float3 position; float3 direction; }; struct payload { float t; float3 bary; cg::color color; }; template<typename VB> struct triangle { triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c); float3 a; float3 b; float3 c; float3 ba; float3 ca; float3 na; float3 nb; float3 nc; float3 ambient; float3 diffuse; float3 emissive; }; template<typename VB> inline triangle<VB>::triangle( const VB& vertex_a, const VB& vertex_b, const VB& vertex_c) { a = float3{vertex_a.x, vertex_a.y, vertex_a.z}; b = float3{vertex_b.x, vertex_b.y, vertex_b.z}; c = float3{vertex_c.x, vertex_c.y, vertex_c.z}; ba = b - a; ca = c - a; na = float3{vertex_a.nx, vertex_a.ny, vertex_a.nz}; nb = float3{vertex_b.nx, vertex_b.ny, vertex_b.nz}; nc = float3{vertex_c.nx, vertex_c.ny, vertex_c.nz}; ambient = {vertex_a.ambient_r, vertex_a.ambient_g, vertex_a.ambient_b}; diffuse = {vertex_a.diffuse_r, vertex_a.diffuse_g, vertex_a.diffuse_b}; emissive = {vertex_a.emissive_r, vertex_a.emissive_g, vertex_a.emissive_b}; } template<typename VB> class aabb { public: void add_triangle(const triangle<VB> triangle); const std::vector<triangle<VB>>& get_triangles() const; bool aabb_test(const ray& ray) const; protected: std::vector<triangle<VB>> triangles; float3 aabb_min; float3 aabb_max; }; struct light { float3 position; float3 color; }; template<typename VB, typename RT> class raytracer { public: raytracer(){}; ~raytracer(){}; void set_render_target(std::shared_ptr<resource<RT>> in_render_target); void clear_render_target(const RT& in_clear_value); void set_viewport(size_t in_width, size_t in_height); void set_vertex_buffers(std::vector<std::shared_ptr<cg::resource<VB>>> in_vertex_buffers); void set_index_buffers(std::vector<std::shared_ptr<cg::resource<unsigned int>>> in_index_buffers); void build_acceleration_structure(); std::vector<aabb<VB>> acceleration_structures; void ray_generation(float3 position, float3 direction, float3 right, float3 up, size_t depth, size_t accumulation_num); payload trace_ray(const ray& ray, size_t depth, float max_t = 1000.f, float min_t = 0.001f) const; payload intersection_shader(const triangle<VB>& triangle, const ray& ray) const; std::function<payload(const ray& ray)> miss_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle, size_t depth)> closest_hit_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> any_hit_shader = nullptr; float2 get_jitter(int frame_id); protected: std::shared_ptr<cg::resource<RT>> render_target; std::shared_ptr<cg::resource<float3>> history; std::vector<std::shared_ptr<cg::resource<unsigned int>>> index_buffers; std::vector<std::shared_ptr<cg::resource<VB>>> vertex_buffers; size_t width = 1920; size_t height = 1080; }; template<typename VB, typename RT> inline void raytracer<VB, RT>::set_render_target( std::shared_ptr<resource<RT>> in_render_target) { render_target = in_render_target; } template<typename VB, typename RT> inline void raytracer<VB, RT>::clear_render_target(const RT& in_clear_value) { for (size_t i=0; i<render_target->get_number_of_elements(); i++ ) { render_target->item(i) = in_clear_value; if(history) history->item(i) = float3 {0.f, 0.f, 0.f}; } } template<typename VB, typename RT> void raytracer<VB, RT>::set_index_buffers(std::vector<std::shared_ptr<cg::resource<unsigned int>>> in_index_buffers) { index_buffers= in_index_buffers; } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_vertex_buffers(std::vector<std::shared_ptr<cg::resource<VB>>> in_vertex_buffers) { vertex_buffers = in_vertex_buffers; } template<typename VB, typename RT> inline void raytracer<VB, RT>::build_acceleration_structure() { for(size_t shape_id = 0; shape_id < index_buffers.size(); shape_id++){ auto& index_buffer = index_buffers[shape_id]; auto& vertex_buffer = vertex_buffers[shape_id]; size_t index_id = 0; aabb<VB> aabb; while(index_id < index_buffer-> get_number_of_elements()) { triangle<VB> triangle( vertex_buffer->item(index_buffer->item(index_id++)), vertex_buffer->item(index_buffer->item(index_id++)), vertex_buffer->item(index_buffer->item(index_id++))); aabb.add_triangle(triangle); } acceleration_structures.push_back(aabb); } } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_viewport(size_t in_width, size_t in_height) { width = in_width; height = in_height; history = std::make_shared<cg::resource<float3>>(width, height); } template<typename VB, typename RT> inline void raytracer<VB, RT>::ray_generation(float3 position, float3 direction, float3 right, float3 up, size_t depth, size_t accumulation_num) { float frame_weight = 1.f/static_cast<float>(accumulation_num); for(int frame_id = 0; frame_id < accumulation_num; frame_id++) { float2 jitter = get_jitter(frame_id); for (int x = 0; x < width; x++) { #pragma omp parallel for for (int y = 0; y < height; y++) { float u = (2.f * x + jitter.x) / static_cast<float>(width - 1) - 1.f; float v = (2.f * y + jitter.y) / static_cast<float>(height - 1) - 1.f; u *= static_cast<float>(width) / static_cast<float>(height); float3 ray_direction = direction + u * right - v * up; ray ray(position, ray_direction); payload payload = trace_ray(ray, depth); auto& history_pixel = history->item(x,y); history_pixel += sqrt(float3{payload.color.r, payload.color.g, payload.color.b}*frame_weight); render_target->item(x, y) = RT::from_float3(history_pixel); } } } } template<typename VB, typename RT> inline payload raytracer<VB, RT>::trace_ray( const ray& ray, size_t depth, float max_t, float min_t) const { if (depth == 0) return miss_shader(ray); depth--; payload closest_hit_payload = {}; closest_hit_payload.t = max_t; const triangle<VB>* closest_triangle = nullptr; for(auto& aabb: acceleration_structures) { if(!aabb.aabb_test(ray)) continue; for (auto& triangle : aabb.get_triangles()) { payload payload = intersection_shader(triangle, ray); if (payload.t > min_t && payload.t < closest_hit_payload.t) { closest_hit_payload = payload; closest_triangle = &triangle; if(any_hit_shader) return any_hit_shader(ray, payload, triangle); } } } if (closest_hit_payload.t < max_t) { if (closest_hit_shader) return closest_hit_shader(ray, closest_hit_payload, *closest_triangle, depth); } return miss_shader(ray); } template<typename VB, typename RT> inline payload raytracer<VB, RT>::intersection_shader( const triangle<VB>& triangle, const ray& ray) const { payload payload{}; payload.t = -1.f; float3 pvec = cross(ray.direction, triangle.ca); float det = dot(triangle.ba, pvec); if (det > -1e-8 && det < 1e-8) return payload; float inv_det = 1.f / det; float3 tvec = ray.position - triangle.a; float u = dot(tvec, pvec) * inv_det; if(u < 0.f || u > 1.f) return payload; float3 qvec = cross(tvec, triangle.ba); float v = dot(ray.direction, qvec) * inv_det; if (v < 0.f || u + v > 1.f) return payload; payload.t = dot(triangle.ca, qvec) * inv_det; payload.bary = float3 {1.f - u - v, u, v}; return payload; } template<typename VB, typename RT> float2 raytracer<VB, RT>::get_jitter(int frame_id) { float2 result{0.f, 0.f,}; constexpr int base_x = 2; int index = frame_id + 1; float inv_base = 1.f/base_x; float fraction = inv_base; while(index > 0) { result.x += (index % base_x) * fraction; index /= base_x; fraction *= inv_base; } constexpr int base_y = 3; index = frame_id + 1; inv_base = 1.f/base_y; fraction = inv_base; while(index > 0) { result.y += (index % base_y) * fraction; index /= base_y; fraction *= inv_base; } return result - 0.5f; } template<typename VB> inline void aabb<VB>::add_triangle(const triangle<VB> triangle) { if(triangles.empty()) aabb_max = aabb_min = triangle.a; triangles.push_back(triangle); aabb_max = max(aabb_max, triangle.a); aabb_max = max(aabb_max, triangle.b); aabb_max = max(aabb_max, triangle.c); aabb_min = min(aabb_min, triangle.a); aabb_min = min(aabb_min, triangle.b); aabb_min = min(aabb_min, triangle.c); } template<typename VB> inline const std::vector<triangle<VB>>& aabb<VB>::get_triangles() const { return triangles; } template<typename VB> inline bool aabb<VB>::aabb_test(const ray& ray) const { float3 inv_ray_direction = float3(1.f) / ray.direction; float3 t0 = (aabb_max - ray.position) * inv_ray_direction; float3 t1 = (aabb_min - ray.position) * inv_ray_direction; float3 tmax = max(t0, t1); float3 tmin = min(t0, t1); return maxelem(tmin) <= minelem(tmax); } }// namespace cg::renderer
test2.c
#include <stdio.h> #include <omp.h> #define N (10000000) int A[N]; int B[N/2]; void g() { int i=0; for (i = 0; i<N; i++) A[i] ++; } void h() { int i; for (i = 0; i<N/2; i++) B[i]++; } void f() { g(); } int main() { #pragma omp parallel num_threads(2) { #pragma omp sections { #pragma omp section { f(); } #pragma omp section { h(); } } } return 0; }
validate_yolo8.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_256_68_68_128_3_3.h" #include "gen_ukr_A4B2gemm_1_256_68_68_128_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 68; int Ny = 68; int Nh = 3; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } //int Tc1 = 8; // 1 8 16 32 48 //int Txy3 = 12*8; // 18, 36, 72, 144 //int Tf2 = 144; // 32, 64, 128 ,256 #pragma omp barrier// begin push button generated block for(int xy5=0;xy5<4624+0;xy5+=4624) { for(int f5=0;f5<256+0;f5+=256) { for(int c5=0;c5<128+0;c5+=128) { // full space for(int c4=c5;c4<min(128, 128+c5);c4+=128) { for(int f4=f5;f4<min(256, 256+f5);f4+=Tf2) { for(int xy4=xy5;xy4<min(4624, 4624+xy5);xy4+=4624) { for(int c3=c4;c3<min(128, 128+c4);c3+=Tc1) // FUll { for(int xy3=xy4;xy3<min(4624, 4624+xy4);xy3+=Txy3) //Tc1, Nxy, Nf { for(int f3=f4;f3<min(256, Tf2+f4);f3+=Tf2) // Tc1, Txy3, Nf { for(int xy2=xy3;xy2<min(4624, Txy3+xy3);xy2+=6) //Tc1, Txy3, Tf2 { for(int f2=f3;f2<min(256, Tf2+f3);f2+=16) // Tc1, 6, Tf2 { for(int c2=c3;c2<min(128, Tc1+c3);c2+=Tc1) // Tc1, 6, 16 { for(int c1=c2;c1<min(128, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(4624, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(256, 16+f2);f1+=16) { int ctile=min(Tc1, 128-c1); int x1=xy1/68; int y1=xy1%68/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*627200+c1_1*4900+1*x1*70+1*y1*1+c1_2*1; int offsetB=0+kf1_1*18432+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*1183744+of1_1*4624+x1*68+y1*1+of1_2*1; if(68-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(68*68-xy1>=6){ for(int sti=68-y1;sti<6;sti+=1) { Astrides[sti]+=2; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=68-y1;sti<6;sti+=1) { Astrides[sti]-=2; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
distributed_pFMM_matrix.h
/* Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of Technology All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the names of VSB - Technical University of Ostrava and Graz University of Technology nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file distributed_pFMM_matrix.h * @brief Represents matrix approximated by the pFMM, distributed among a set * of processes */ #ifndef INCLUDE_BESTHEA_DISTRIBUTED_PFMM_MATRIX_H_ #define INCLUDE_BESTHEA_DISTRIBUTED_PFMM_MATRIX_H_ #include "besthea/basis_tri_p0.h" #include "besthea/basis_tri_p1.h" #include "besthea/block_linear_operator.h" #include "besthea/chebyshev_evaluator.h" #include "besthea/distributed_block_vector.h" #include "besthea/distributed_fast_spacetime_be_space.h" #include "besthea/distributed_spacetime_cluster_tree.h" #include "besthea/full_matrix.h" #include "besthea/general_spacetime_cluster.h" #include "besthea/lagrange_interpolant.h" #include "besthea/settings.h" #include "besthea/spacetime_heat_adl_kernel_antiderivative.h" #include "besthea/spacetime_heat_dl_kernel_antiderivative.h" #include "besthea/spacetime_heat_hs_kernel_antiderivative.h" #include "besthea/spacetime_heat_sl_kernel_antiderivative.h" #include "besthea/timer.h" #include "besthea/tree_structure.h" #include "besthea/vector.h" #include <chrono> #include <list> #include <mpi.h> #include <unordered_map> namespace besthea { namespace linear_algebra { template< class kernel_type, class target_space, class source_space > class distributed_pFMM_matrix; } } /** * Class representing a matrix approximated by the pFMM method. */ template< class kernel_type, class target_space, class source_space > class besthea::linear_algebra::distributed_pFMM_matrix : public besthea::linear_algebra::block_linear_operator { public: using vector_type = besthea::linear_algebra::vector; //!< Vector type. using timer_type = besthea::tools::timer; //!< Timer type /** * Wraps the mapped quadrature point so that they can be private for OpenMP * threads * @note This is needed to compute the quadratures of Chebyshev polynomials * in space (since these are no longer stored and hence not assembled by * the corresponding assembler) */ struct quadrature_wrapper { std::array< std::vector< sc, besthea::allocator_type< sc > >, 4 > _x1_ref; //!< First coordinates of quadrature nodes in (0,1)x(0,1-x1) to //!< be mapped to the test element std::array< std::vector< sc, besthea::allocator_type< sc > >, 4 > _x2_ref; //!< Second coordinates of quadrature nodes in (0,1)x(0,1-x1) to //!< be mapped to the test element std::array< std::vector< sc, besthea::allocator_type< sc > >, 4 > _y1_ref; //!< First coordinates of quadrature nodes in (0,1)x(0,1-x1) to //!< be mapped to the trial element std::array< std::vector< sc, besthea::allocator_type< sc > >, 4 > _y2_ref; //!< Second coordinates of quadrature nodes in (0,1)x(0,1-x1) to //!< be mapped to the trial element std::array< std::vector< sc, besthea::allocator_type< sc > >, 4 > _w; //!< Quadrature weights including transformation Jacobians std::vector< sc, besthea::allocator_type< sc > > _x1; //!< First coordinates of quadrature nodes in the test element std::vector< sc, besthea::allocator_type< sc > > _x2; //!< Second coordinates of quadrature nodes in the test element std::vector< sc, besthea::allocator_type< sc > > _x3; //!< Third coordinates of quadrature nodes in the test element std::vector< sc, besthea::allocator_type< sc > > _y1; //!< First coordinates of quadrature nodes in the trial element std::vector< sc, besthea::allocator_type< sc > > _y2; //!< Second coordinates of quadrature nodes in the trial element std::vector< sc, besthea::allocator_type< sc > > _y3; //!< Third coordinates of quadrature nodes in the trial element std::vector< sc, besthea::allocator_type< sc > > _kernel_values; //!< Buffer for storing kernel values. std::vector< sc, besthea::allocator_type< sc > > _kernel_values_2; //!< Buffer for storing additional kernel values. std::vector< sc, besthea::allocator_type< sc > > _y1_ref_cheb; //!< First coordinates of quadrature nodes for the //!< Chebyshev polynomials in (0,1)x(0,1-x1) to be mapped //!< to the test element std::vector< sc, besthea::allocator_type< sc > > _y2_ref_cheb; //!< Second coordinates of quadrature nodes for the //!< Chebyshev polynomials in (0,1)x(0,1-x1) to be mapped //!< to the test element vector_type _y1_polynomial; //!< Coordinates for evaluation of the Chebyshev //!< polynomials in the interval [-1,1] in x direction vector_type _y2_polynomial; //!< Coordinates for evaluation of the Chebyshev //!< polynomials in the interval [-1,1] in y direction vector_type _y3_polynomial; //!< Coordinates for evaluation of the Chebyshev //!< polynomials in the interval [-1,1] in z direction std::vector< sc, besthea::allocator_type< sc > > _wy_cheb; //!< Quadrature weights including }; /** * Default constructor. */ distributed_pFMM_matrix( ) : _my_rank( -1 ), _distributed_spacetime_tree( nullptr ), _scheduling_tree_structure( nullptr ), _temp_order( 5 ), _spat_order( 5 ), _m2l_integration_order( _spat_order ), _spat_contribution_size( ( ( _spat_order + 3 ) * ( _spat_order + 2 ) * ( _spat_order + 1 ) ) / 6 ), _contribution_size( ( _temp_order + 1 ) * _spat_contribution_size ), _chebyshev( _spat_order ), _lagrange( _temp_order ), _alpha( 1.0 ), _cheb_nodes_sum_coll( ( _m2l_integration_order + 1 ) * ( _m2l_integration_order + 1 ) ), _all_poly_vals_mult_coll( ( _spat_order + 1 ) * ( _spat_order + 1 ) * ( _m2l_integration_order + 1 ) * ( _m2l_integration_order + 1 ) ), _verbose( false ), _measure_tasks( false ), _non_nf_op_count( 0 ), _m_task_times( omp_get_max_threads( ) ), _m2l_task_times( omp_get_max_threads( ) ), _l_task_times( omp_get_max_threads( ) ), _n_task_times( omp_get_max_threads( ) ), _m_subtask_times( omp_get_max_threads( ) ), _m2l_subtask_times( omp_get_max_threads( ) ), _l_subtask_times( omp_get_max_threads( ) ), _n_subtask_times( omp_get_max_threads( ) ), _mpi_send_m2l( omp_get_max_threads( ) ), _mpi_send_m_parent( omp_get_max_threads( ) ), _mpi_send_l_children( omp_get_max_threads( ) ), _mpi_recv_m2l( omp_get_max_threads( ) ), _mpi_recv_m_parent( omp_get_max_threads( ) ), _mpi_recv_l_children( omp_get_max_threads( ) ) { } distributed_pFMM_matrix( const distributed_pFMM_matrix & that ) = delete; /** * Destructor */ virtual ~distributed_pFMM_matrix( ) { for ( auto it = _clusterwise_nearfield_matrices.begin( ); it != _clusterwise_nearfield_matrices.end( ); ++it ) { // loop over all nearfield matrices associated with a given spacetime // cluster and delete them. for ( auto matrix : it->second ) { delete matrix; } } } /*! * @brief y = beta * y + alpha * (this)^trans * x using block vectors. * @param[in] x * @param[in,out] y * @param[in] trans Flag for transpose of individual blocks (not the whole * block matrix!). * @param[in] alpha * @param[in] beta * @note This routine is just a dummy here. Please use the corresponding * version with distributed block vectors. */ virtual void apply( const block_vector & x, block_vector & y, bool trans = false, sc alpha = 1.0, sc beta = 0.0 ) const; /*! * @brief y = beta * y + alpha * (this)^trans * x using block vectors. * @param[in] x * @param[in,out] y * @param[in] trans Flag for transpose of individual blocks (not the whole * block matrix!). * @param[in] alpha * @param[in] beta */ virtual void apply( const distributed_block_vector & x, distributed_block_vector & y, bool trans = false, sc alpha = 1.0, sc beta = 0.0 ) const; /*! * @brief y = beta * y + alpha * (this)^trans * x using distributed block * vectors for single, double and adjoint double layer operators. * @param[in] x * @param[in,out] y * @param[in] trans Flag for transpose of individual blocks (not the whole * block matrix!). * @param[in] alpha * @param[in] beta * @todo we should disable trans somehow, since it is not implemented * correctly. */ void apply_sl_dl( const distributed_block_vector & x, distributed_block_vector & y, bool trans, sc alpha, sc beta ) const; /*! * @brief y = beta * y + alpha * (this)^trans * x using distributed block * vectors for the hypersingular operator. * @param[in] x * @param[in,out] y * @param[in] trans Flag for transpose of individual blocks (not the whole * block matrix!). * @param[in] alpha * @param[in] beta * @todo we should disable trans somehow, since it is not implemented * correctly. */ void apply_hs( const distributed_block_vector & x, distributed_block_vector & y, bool trans, sc alpha, sc beta ) const; /** * @brief Realizes one run of the distributed pFMM algorithm. (executing all * farfield operations and, if @p run_count = 0 also all nearfield * operations). * @param[in] x Distributed vector which contains the sources. * @param[in] y_pFMM Distributed vector to which the result is added. * @param[in] trans Flag for transpose of individual blocks (not the whole * block matrix!). * @tparam run_count This parameter keeps track how often the pFMM procedure * has been executed. It is used to select the appropriate * s2m and l2t operations for each run of the pFMM * algorithm for the hypersingular operator. For all other * operators it has no effect. * @todo we should disable trans somehow, since it is not implemented * correctly. * @note This routine is called in the routines @ref apply_sl_dl and * @ref apply_hs. * */ template< slou run_count > void apply_pFMM_procedure( const distributed_block_vector & x, distributed_block_vector & y_pFMM, bool trans ) const; /** * Sets the MPI communicator associated with the distributed pFMM matrix and * the rank of the executing process. * @param[in] comm MPI communicator to be set. */ void set_MPI_communicator( const MPI_Comm * comm ) { _comm = comm; MPI_Comm_rank( *_comm, &_my_rank ); } /** * Sets the underlying distributed spacetime tree and tree structure for * scheduling the operations. The size of the clusterwise nearfield * matrix container is set appropriately. * @param[in] distributed_spacetime_tree The distributed spacetime tree. Its * distribution tree is used as the * scheduling tree structure. */ void set_trees( mesh::distributed_spacetime_cluster_tree * distributed_spacetime_tree ); /** * Sets the heat conductivity parameter. * @param[in] alpha Heat conductivity. */ void set_alpha( sc alpha ) { _alpha = alpha; } /** * Sets the dimension of the matrix. * @param[in] block_dim Block dimension. * @param[in] dim_domain Number of columns in a block. * @param[in] dim_range Number of rows in a block. * @note the member variables which are set are inherited from * @ref block_linear_operator. */ void resize( lo block_dim, lo dim_domain, lo dim_range ) { _block_dim = block_dim; _dim_domain = dim_domain; _dim_range = dim_range; } /** * Sets the order of the Lagrange and Chebyshev polynomials and the quadrature * order for numerical integration. * @param[in] spat_order Order of the Chebyshev polynomials. * @param[in] temp_order Order of the Lagrange polynomials. * @param[in] order_regular Quadrature order. */ void set_order( int spat_order, int temp_order, int order_regular ) { _spat_order = spat_order; _temp_order = temp_order; _order_regular = order_regular; _spat_contribution_size = ( ( _spat_order + 3 ) * ( _spat_order + 2 ) * ( _spat_order + 1 ) ) / 6; _contribution_size = ( _temp_order + 1 ) * _spat_contribution_size; _chebyshev.set_order( spat_order ); _lagrange.set_order( temp_order ); } /** * Sets the integration order for the m2l coefficients. * @param[in] m2l_integration_order M2L integration order. */ void set_m2l_integration_order( int m2l_integration_order ) { _m2l_integration_order = m2l_integration_order; } /** * Fills the 4 lists used for scheduling the FMM operations by adding pointers * to clusters assigned to the process with id @p _my_process_id. In addition * it determines all pairs of clusters and process ids from which data is * received, and initializes the data in the scheduling time clusters which is * used to check the dependencies. * @note All lists are constructed anew, existing values are overwritten. * @note The clusters in the m_list are sorted using the comparison operator * @ref mesh::tree_structure::compare_clusters_bottom_up_right_2_left, * the others using * @ref mesh::tree_structure::compare_clusters_top_down_right_2_left. */ void prepare_fmm( ); /** * Creates a nearfield matrix for two clusters * @param[in] leaf_index Index of the local leaf cluster, which acts as the * target. * @param[in] source_index Index of the source cluster in the nearfield list * of the target cluster. */ full_matrix * create_nearfield_matrix( lou leaf_index, lou source_index ); /** * Compute the spatial m2m coefficients for all local spatial levels. */ void compute_spatial_m2m_coeffs( ); /** * Compute Chebyshev nodes and evaluate them. */ void compute_chebyshev( ); /** * Prints information about the underlying distributed spacetime cluster tree * and the operations which have to be applied. * @param[in] root_process Process responsible for printing the information. * @param[in] print_tree_information If true, information is printed for the * distributed spacetime cluster tree * corresponding to the matrix. */ void print_information( const int root_process, const bool print_tree_information = false ); /*! * Setter for verbosity during matrix-vector multiplication * @param[in] verbose When true, prints information to file. */ void set_verbose( bool verbose ) { _verbose = verbose; } /*! * Setter for task timer during matrix-vector multiplication * @param[in] measure_tasks When true, measures and prints timing of * individual tasks. */ void set_task_timer( bool measure_tasks ) { _measure_tasks = measure_tasks; } /*! * Auxiliary method that sorts clusters within the _n_list to improve shared * memory scalability during matrix vector multiplication. */ void sort_clusters_in_nearfield( ); private: /** * Calls all S2M operations associated with a given scheduling time cluster. * @param[in] sources Global sources containing the once used for the S2M * operation. * @param[in] t_cluster Considered scheduling time cluster. * @param[in] verbose If true, the required time is written to file. * @param[in] verbose_file If @p verbose is true, this is used as output * file. * @tparam run_count Run count of the corresponding pFMM procedure. It is * used to choose the appropriate s2m operation for this * run in case of the hypersingular operator. */ template< slou run_count > void call_s2m_operations( const distributed_block_vector & sources, mesh::scheduling_time_cluster * t_cluster, bool verbose, const std::string & verbose_file ) const; /** * Applies the appropriate S2M operation for the given source cluster and * sources depending on the boundary integral operator. * @param[in] source_vector Global sources containing the once used for the * S2M operation. * @param[in] source_cluster Considered spacetime cluster. * @tparam run_count Run count of the corresponding pFMM procedure. It is * used to choose the appropriate s2m operation for this * run in case of the hypersingular operator. */ template< slou run_count > void apply_s2m_operation( const distributed_block_vector & source_vector, mesh::general_spacetime_cluster * source_cluster ) const; /** * Applies the S2M operation for the given source cluster and sources for * p0 basis functions (for single layer and adjoint double layer operators) * @param[in] source_vector Global sources containing the once used for the * S2M operation. * @param[in] source_cluster Considered spacetime cluster. * @todo Use buffers instead of reallocating sources and aux buffer in every * function call? * @todo Store the quadratures of Chebyshev polynomials in space and Lagrange * polynomials in time again? */ void apply_s2m_operation_p0( const distributed_block_vector & source_vector, mesh::general_spacetime_cluster * source_cluster ) const; /** * Applies the S2M operation for the given source cluster and sources for * p1 basis functions and normal derivatives of spatial polynomials (for * double layer operator) * @param[in] source_vector Global sources containing the once used for the * S2M operation. * @param[in] source_cluster Considered spacetime cluster. * @todo Use buffers instead of reallocating sources and aux buffer in every * function call? * @todo Store the quadratures of Chebyshev polynomials in space and Lagrange * polynomials in time again? */ void apply_s2m_operations_p1_normal_drv( const distributed_block_vector & source_vector, mesh::general_spacetime_cluster * source_cluster ) const; /** * Applies the S2M operation for the given source cluster and sources for * a selected component of the surface curls of p1 basis functions (for * hypersingular operator) * @param[in] source_vector Global sources containing the once used for the * S2M operation. * @param[in] source_cluster Considered spacetime cluster. * @tparam dim Used to select the component of the surface curls (0,1 or 2). * @todo Use buffers instead of reallocating sources and aux buffer in every * function call? */ template< slou dim > void apply_s2m_operation_curl_p1_hs( const distributed_block_vector & source_vector, mesh::general_spacetime_cluster * source_cluster ) const; /** * Applies the S2M operation for the given source cluster and sources for p1 * basis functions and a selected component of the normal derivative of the * Chebyshev polynomials, which are used for the expansion (for hypersingular * operator) * @param[in] source_vector Global sources containing the once used for the * S2M operation. * @param[in] source_cluster Considered spacetime cluster. * @param[in] dimension Used to select the component of the normal derivatives * of the Chebyshev polynomials (0,1 or 2). * @todo Use buffers instead of reallocating sources and aux buffer in every * function call? */ void apply_s2m_operation_p1_normal_hs( const distributed_block_vector & source_vector, mesh::general_spacetime_cluster * source_cluster, const slou dimension ) const; /** * Calls all M2M operations associated with a given scheduling time cluster. * @param[in] t_cluster Considered scheduling time cluster. * @param[in] verbose If true, the required time is written to file. * @param[in] verbose_file If @p verbose is true, this is used as output * file. */ void call_m2m_operations( mesh::scheduling_time_cluster * t_cluster, bool verbose, const std::string & verbose_file ) const; /** * Applies the M2M operations for the given parent cluster and all its * children for a given temporal configuration. * @param[in] parent_cluster Considered spacetime parent cluster. * @param[in] child_configuration Indicates for which children the m2m * operations are executed: * - 0: left children w.r.t. to time. * - 1: right chilren w.r.t. to time. */ void apply_grouped_m2m_operation( mesh::general_spacetime_cluster * parent_cluster, slou child_configuration ) const; /** * Applies the temporal m2m operation to a child_moment and adds the result * to the parent moment. * @param[in] child_moment Array containing the moments of the child cluster. * @param[in] temporal_m2m_matrix Matrix used for the temporal m2m operation. * @param[in,out] parent_moment Array to which the result is added. */ void apply_temporal_m2m_operation( const sc * child_moment, const full_matrix & temporal_m2m_matrix, sc * parent_moment ) const; /** * Applies the spatial m2m operation to a child_moment and adds the result * to a given array. * @param[in] child_moment Array containing the moments of the child cluster. * @param[in] n_space_div_parent Number of refinements in space executed for * the parent cluster. * @param[in] octant Configuration of the child cluster with respect to its * parent in space. * @param[in,out] output_array Array to which the result is added. * @note @p n_space_div_parent and @p octant are used to determine the * appropriate m2m coefficients for the operation. */ void apply_spatial_m2m_operation( const sc * child_moment, const lo n_space_div_parent, const slou octant, std::vector< sc > & output_array ) const; /** * Calls all M2L operations associated with a given pair of scheduling time * clusters. * @param[in] src_cluster Scheduling time cluster which acts as source in M2L. * @param[in] tar_cluster Scheduling time cluster which acts as target in M2L. * @param[in] verbose If true, the required time is written to file. * @param[in] verbose_file If @p verbose is true, this is used as output * file. */ void call_m2l_operations( mesh::scheduling_time_cluster * src_cluster, mesh::scheduling_time_cluster * tar_cluster, bool verbose, const std::string & verbose_file ) const; /** * Applies the M2L operation for given source and target clusters. * @param[in] src_cluster Spacetime source cluster for the M2L operation. * @param[in,out] tar_cluster Spacetime target cluster for the M2L operation. * @todo add buffers instead of reallocation? */ void apply_m2l_operation( const mesh::general_spacetime_cluster * src_cluster, mesh::general_spacetime_cluster * tar_cluster ) const; /** * Calls all L2L operations associated with a given scheduling time cluster. * @param[in] t_cluster Considered scheduling time cluster. * @param[in] verbose If true, the required time is written to file. * @param[in] verbose_file If @p verbose is true, this is used as output * file. */ void call_l2l_operations( mesh::scheduling_time_cluster * t_cluster, bool verbose, const std::string & verbose_file ) const; /** * Applies the L2L operations for the given parent cluster and all its * children for a given temporal configuration. * @param[in] parent_cluster Considered spacetime parent cluster. * @param[in] child_configuration Indicates for which children the l2l * operations are executed: * - 0: left children w.r.t. to time. * - 1: right chilren w.r.t. to time. */ void apply_grouped_l2l_operation( mesh::general_spacetime_cluster * parent_cluster, slou child_configuration ) const; /** * Applies the temporal l2l operation to a child_moment and adds the result * to the parent moment. * @param[in] parent_local_contribution Array containing the moments of the * child cluster. * @param[in] temporal_l2l_matrix Matrix used for the temporal l2l operation. * @param[in,out] child_local_contribution Array to which the result is * added. */ void apply_temporal_l2l_operation( const sc * parent_local_contribution, const full_matrix & temporal_l2l_matrix, sc * child_local_contribution ) const; /** * Applies the spatial l2l operation to a child_moment and adds the result * to a given array. * @param[in] parent_local Array containing the local * contributions of the parent cluster. * @param[in] n_space_div_parent Number of refinements in space executed for * the parent cluster. * @param[in] octant Configuration of the child cluster with respect to its * parent in space. * @param[in,out] child_local Array to which the result is * added. * @note @p n_space_div_parent and @p octant are used to determine the * appropriate l2l coefficients for the operation. */ void apply_spatial_l2l_operation( const sc * parent_local, const lo n_space_div_parent, const slou octant, sc * child_local ) const; /** * Calls all L2T operations associated with a given scheduling time cluster. * @param[in] t_cluster Considered scheduling time cluster. * @param[in,out] output_vector Block vector to which the results are added. * @param[in] verbose If true, the required time is written to file. * @param[in] verbose_file If @p verbose is true, this is used as output * file. * @tparam run_count Run count of the corresponding pFMM procedure. It is * used to choose the appropriate l2t operation for this * run in case of the hypersingular operator. */ template< slou run_count > void call_l2t_operations( mesh::scheduling_time_cluster * t_cluster, distributed_block_vector & output_vector, bool verbose, const std::string & verbose_file ) const; /** * Applies the appropriate L2T operation for the given target cluster * depending on the boundary integral operator and writes the result to the * appropriate part of the output vector. * @param[in] cluster Considered spacetime cluster. * @param[in,out] output_vector Global result vector to which the result of * the operation is added. * @todo Use buffers instead of reallocating targets and aux buffer in every * function call? * @todo Store the quadratures of Chebyshev polynomials in space and Lagrange * polynomials in time again? * @tparam run_count Run count of the corresponding pFMM procedure. It is * used to choose the appropriate s2m operation for this * run in case of the hypersingular operator. */ template< slou run_count > void apply_l2t_operation( const mesh::general_spacetime_cluster * cluster, distributed_block_vector & output_vector ) const; /** * Applies the L2T operation for the given target cluster for p0 basis * functions and writes the result to the appropriate part of the output * vector. * @param[in] cluster Considered spacetime cluster. * @param[in,out] output_vector Global result vector to which the result of * the operation is added. * @todo Use buffers instead of reallocating targets and aux buffer in every * function call? * @todo Store the quadratures of Chebyshev polynomials in space and Lagrange * polynomials in time again? */ void apply_l2t_operation_p0( const mesh::general_spacetime_cluster * cluster, distributed_block_vector & output_vector ) const; /** * Applies the L2T operation for the given target cluster for p1 basis * functions and normal derivatives of spatial polynomials (for adjoint double * layer operator and hypersingular operator) functions and writes the result * to the appropriate part of the output vector. * @param[in] cluster Considered spacetime cluster. * @param[in,out] output_vector Global result vector to which the result of * the operation is added. * @todo Use buffers instead of reallocating targets and aux buffer in every * function call? * @todo Store the quadratures of Chebyshev polynomials in space and Lagrange * polynomials in time again? */ void apply_l2t_operation_p1_normal_drv( const mesh::general_spacetime_cluster * cluster, distributed_block_vector & output_vector ) const; /** * Applies the L2T operation for the given target cluster for a selected * component of the surface curls of p1 basis functions (for hypersingular * operator) and writes the result to the appropriate part of the output * vector. * @param[in] cluster Considered spacetime cluster. * @param[in,out] output_vector Global result vector to which the result of * the operation is added. * @tparam dim Used to select the component of the surface curls (0,1 or 2). * @todo Use buffers instead of reallocating targets and aux buffer in every * function call? */ template< slou dim > void apply_l2t_operation_curl_p1_hs( const mesh::general_spacetime_cluster * cluster, distributed_block_vector & output_vector ) const; /** * Applies the L2T operation for the given target cluster for p1 basis * functions and a selected component of the normal derivative of the * Chebyshev polynomials, which are used for the expansion (for hypersingular * operator), and writes the result to the appropriate part of the output * vector. * @param[in] cluster Considered spacetime cluster. * @param[in] dimension Used to select the component of the normal derivatives * of the Chebyshev polynomials (0,1 or 2). * @param[in,out] output_vector Global result vector to which the result of * the operation is added. * @todo Use buffers instead of reallocating targets and aux buffer in every * function call? */ void apply_l2t_operation_p1_normal_hs( const mesh::general_spacetime_cluster * cluster, const slou dimension, distributed_block_vector & output_vector ) const; /** * Executes all nearfield operations associated with a given scheduling time * cluster. * @param[in] cluster Time cluster whose associated nearfield operations * are executed. * @param[in] sources Global sources containing the once used for the * nearfield operation. * @param[in] trans If true, the transposed nearfield matrices are applied * otherwise the standard nearfield matrices. * @param[in,out] output_vector Vector to which the results are added. * @param[in] verbose If true, the required time is written to file. * @param[in] verbose_file If @p verbose is true, this is used as output * file. */ void apply_nearfield_operations( const mesh::scheduling_time_cluster * cluster, const distributed_block_vector & sources, bool trans, distributed_block_vector & output_vector, bool verbose, const std::string & verbose_file ) const; /** * Calls MPI_Testsome for an array of Requests to check for received data. * @param[in,out] array_of_requests Array containing the MPI requests which * are checked. * @param[in,out] array_of_indices Array in which the indices of the * completed requests are stored. This is * used as an input variable to avoid * reallocation in each function call. * @param[in,out] outcount Stores the number of Requests which are completed. */ void check_for_received_data( std::vector< MPI_Request > & array_of_requests, std::vector< int > & array_of_indices, int & outcount ) const; /** * Returns an iterator pointing to the next cluster in the l-list whose * dependencies are satisfied. In case a cluster is found the status is * updated. If no cluster is found the iterator points to the end of the list * and the status is not modified. * @param[in] l_list A list containing the clusters of @ref _l_list whose * operations have not been executed yet. * @param[out] it_next_cluster If a cluster is found in the list this * iterator points to it. Else it points to the * end of the list. * @param[out] status Set to 2 if a cluster is found. */ void find_cluster_in_l_list( std::list< mesh::scheduling_time_cluster * > & l_list, std::list< mesh::scheduling_time_cluster * >::iterator & it_next_cluster, char & status ) const; /** * Returns an iterator pointing to the next cluster in the m-list whose * dependencies are satisfied. In case a cluster is found the status is * updated. If no cluster is found the iterator points to the end of the list * and the status is not modified. * @param[in] m_list A list containing the clusters of @ref _m_list whose * operations have not been executed yet. * @param[out] it_next_cluster If a cluster is found in the list this * iterator points to it. Else it points to the * end of the list. * @param[out] status Set to 1 if a cluster is found. */ void find_cluster_in_m_list( std::list< mesh::scheduling_time_cluster * > & m_list, std::list< mesh::scheduling_time_cluster * >::iterator & it_next_cluster, char & status ) const; /** * Returns an iterator pointing to the next cluster in the m2l-list whose * dependencies are satisfied. In case a cluster is found the status is * updated. If no cluster is found the iterator points to the end of the list * and the status is not modified. * @param[in] m2l_list A list containing the clusters of @ref _m2l_list whose * operations have not been executed yet. * @param[out] it_next_cluster If a cluster is found in the list this * iterator points to it. Else it points to the * end of the list. * @param[out] status Set to 3 if a cluster is found. */ void find_cluster_in_m2l_list( std::list< mesh::scheduling_time_cluster * > & m2l_list, std::list< mesh::scheduling_time_cluster * >::iterator & it_next_cluster, char & status ) const; /** * Updates dependency flags or sends moments for M2L operations. * @param[in] src_cluster Considered scheduling time cluster. If a cluster in * its send list is handled by a different process, the * moments are send to this process. * @param[in] verbose If true, the process reports about all initiated send * operations. (Updates of dependency flags are not * reported) * @param[in] verbose_file If @p verbose is true, this is used as output * file. */ void provide_moments_for_m2l( mesh::scheduling_time_cluster * src_cluster, bool verbose, const std::string & verbose_file ) const; /** * Updates dependency flags or sends moments for upward path. * @param[in] child_cluster Considered scheduling time cluster. If its parent * is handled by a different process, the processed * moments are send from the local copy of the parent * cluster to this process. * @param[in] verbose If true, the process reports about all initiated send * operations. (Updates of dependency flags are not * reported) * @param[in] verbose_file If @p verbose is true, this is used as output * file. */ void provide_moments_to_parents( mesh::scheduling_time_cluster * child_cluster, bool verbose, const std::string & verbose_file ) const; /** * Sends local contributions for downward path if necessary. * @param[in] parent_cluster Considered scheduling time cluster. If a child * of it is handled by a different process, the * local contributions are send to this process. * @param[in] verbose If true, the process reports about all initiated send * operations. (Updates of dependency flags are not * reported) * @param[in] verbose_file If @p verbose is true, this is used as output * file. */ void provide_local_contributions_to_children( mesh::scheduling_time_cluster * parent_cluster, bool verbose, const std::string & verbose_file ) const; /** * Starts all receive operations given by a vector of pairs of clusters and * process ids. * @param[in,out] array_of_requests The MPI_Requests of the non-blocking * receive operations are stored in this * array. It is expected to have at least * the size of @p receive_vector. */ void start_receive_operations( std::vector< MPI_Request > & array_of_requests ) const; /** * Compute quadrature of the Chebyshev polynomials and p0 basis functions for * the spatial part of a spacetime cluster * @param[in] source_cluster Cluster for whose spatial component the * quadratures are computed. * @param[out] T Full matrix where the quadratures are stored. The elements * of the cluster vary along the rows, the order of the * polynomial along the columns of the matrix. */ void compute_chebyshev_quadrature_p0( const mesh::general_spacetime_cluster * source_cluster, full_matrix & T ) const; /** * Computes quadrature of the normal derivatives of the Chebyshev polynomials * times p1 basis functions for the spatial part of a spacetime cluster. * @param[in] source_cluster Cluster for whose spatial component the * quadratures are computed. * @param[out] T_drv Full matrix where the quadratures are stored. The * nodes of the cluster vary along the rows, the order of the polynomial * along the columns of the matrix. */ void compute_normal_drv_chebyshev_quadrature_p1( const mesh::general_spacetime_cluster * source_cluster, full_matrix & T_drv ) const; /** * Computes quadrature of the Chebyshev polynomials times a selected component * of the surface curls of p1 basis functions for the spatial part of a * spacetime cluster. * @param[in] source_cluster Cluster for whose spatial component the * quadratures are computed. * @param[out] T_curl_along_dim Full matrix where the quadratures are stored. * The nodes of the cluster vary along the rows, * the order of the polynomial along the columns * of the matrix. * @tparam dim Used to select the component of the surface curls (0,1 or 2). */ template< slou dim > void compute_chebyshev_times_p1_surface_curls_along_dimension( const mesh::general_spacetime_cluster * source_cluster, full_matrix & T_curl_along_dim ) const; /** * Computes quadrature of a selected component of the normal derivatives of * the Chebyshev polynomials times p1 basis functions for the spatial part of * a spacetime cluster. * @param[in] source_cluster Cluster for whose spatial component the * quadratures are computed. * @param[in] dim Used to select the component of the normal derivatives of * the Chebyshev polynomials (0,1 or 2). * @param[out] T_normal_along_dim Full matrix where the quadratures are * stored. The nodes of the cluster vary along * the rows, the order of the polynomial along * the columns of the matrix. */ void compute_chebyshev_times_normal_quadrature_p1_along_dimension( const mesh::general_spacetime_cluster * source_cluster, const slou dim, full_matrix & T_normal_along_dim ) const; /** * Compute quadrature of the Lagrange polynomials and p0 basis functions for * the temporal part of a spacetime cluster * @param[in] source_cluster Cluster for whose temporal component the * quadratures are computed. * @param[out] L Full matrix where the quadratures are stored. The temporal * elements of the cluster vary along the columns, the order * of the polynomial along the rows of the matrix. */ void compute_lagrange_quadrature( const mesh::general_spacetime_cluster * source_cluster, full_matrix & L ) const; /** * Compute quadrature of the derivative of Lagrange polynomials and p0 basis * functions for the temporal part of a spacetime cluster * @param[in] source_cluster Cluster for whose temporal component the * quadratures are computed. * @param[out] L_drv Full matrix where the quadratures are stored. The * temporal elements of the cluster vary along the columns, * the order of the polynomial along the rows of the * matrix. */ void compute_lagrange_drv_quadrature( const mesh::general_spacetime_cluster * source_cluster, full_matrix & L_drv ) const; /*! * Computes coupling coefficients for the spacetime m2l operation for one of * the three space dimensions implicitly given. * @param[in] src_time_nodes Interpolation nodes in time for the source * cluster. * @param[in] tar_time_nodes Interpolation nodes in time for the target * cluster. * @param[in] half_size Half size in space of the current clusters along the * dimension for which the coefficients are computed. * @param[in] center_diff The appropriate component of the difference vector * (target_center - source_center). * @param[in] buffer_for_gaussians Vector with size >= ( _spat_order + 1 * )^2 * * ( _temp_order + 1 )^2 to store * intermediate results in the computation * of the m2l coefficients. * @param[in,out] coupling_coeffs Vector with size >= ( _spat_order + 1 )^2 * * ( _temp_order + 1 )^2 to store m2l * coefficients. */ void compute_coupling_coeffs( const vector_type & src_time_nodes, const vector_type & tar_time_nodes, const sc half_size, const sc center_diff, vector_type & buffer_for_gaussians, vector_type & coupling_coeffs ) const; /** * Traverses the m_list, l_list and m2l_list of the pFMM matrix and resets the * dependency data (i.e. the data used to determine if the operations of a * cluster are ready for execution). */ void reset_scheduling_clusters_dependency_data( ) const; /** * Traverses the distribution tree recursively and resets the downward path * status of the clusters appropriately. * @param[in] root Current cluster in the tree traversal. */ void reset_downward_path_status_recursively( mesh::scheduling_time_cluster * root ) const; /** * Initializes quadrature structures used to integrate Chebyshev polynomials * on triangles. * * The quadrature points and weights on the reference triangle are * initialized. The other structures used for integration of Chebyshev * polynomials are resized appropriately. * * @param[out] my_quadrature Wrapper holding quadrature data. * @todo This is redundant. Can we restructure the code? */ void init_quadrature_polynomials( quadrature_wrapper & my_quadrature ) const; /** * Maps all quadrature nodes (integration of Chebyshev polynomials) from the * reference triangle to the actual geometry. * * The quadrature nodes on the reference triangles have to be given in * @p my_quadrature. The results are stored in this structure too. * * @param[in] y1 Coordinates of the first node of the triangle. * @param[in] y2 Coordinates of the second node of the triangle. * @param[in] y3 Coordinates of the third node of the triangle. * @param[in,out] my_quadrature Structure holding the quadrature nodes. * @todo Check if documentation makes sense in this context. */ void triangle_to_geometry( const linear_algebra::coordinates< 3 > & y1, const linear_algebra::coordinates< 3 > & y2, const linear_algebra::coordinates< 3 > & y3, quadrature_wrapper & my_quadrature ) const; /** * Maps points from a given axis-parallel spatial cluster to the cube [-1,1]^3 * using the standard linear transformation. * * The points are taken from @p my_quadrature and the results are stored there * too. * @param[in,out] my_quadrature Structure holding the points to be mapped and * the results. * @param[in] x_start Lower border of the space cluster along x dimension. * @param[in] x_end Upper border of the space cluster along x dimension. * @param[in] y_start Lower border of the space cluster along y dimension. * @param[in] y_end Upper border of the space cluster along y dimension. * @param[in] z_start Lower border of the space cluster along z dimension. * @param[in] z_end Upper border of the space cluster along z dimension. * @todo This is redundant! Can we restructure the code? * @todo rename the routine to better describe its action? */ void cluster_to_polynomials( quadrature_wrapper & my_quadrature, sc x_start, sc x_end, sc y_start, sc y_end, sc z_start, sc z_end ) const; /** * Returns the ratio of entries of the nearfield blocks of the pFMM matrix * handled by this process and entries of the global, non-approximated matrix. * @note Zeros in nearfield blocks and the full matrix are counted. * @warning If executed in parallel, the results should be added up to get * a meaningful result (due to the comparison with the global number of * entries). */ sc compute_nearfield_ratio( ); /** * Returns the ratio of non-zero entries of the nearfield blocks of the * pFMM matrix handled by this process and non-zero entries of the global, * non-approximated matrix. * @warning If executed in parallel, the results should be added up to get * a meaningful result (due to the comparison with the global number of * entries). */ sc compute_nonzero_nearfield_ratio( ); /** * Counts the number of all FMM operations levelwise * @note m2m and l2l operations are counted for the levels of the children */ void count_fmm_operations_levelwise( std::vector< lou > & n_s2m_operations, std::vector< lou > & n_m2m_operations, std::vector< lou > & n_m2l_operations, std::vector< lou > & n_l2l_operations, std::vector< lou > & n_l2t_operations ); /** * Task in the M-list * @param[in] x Input vector * @param[in] t_cluster Considered scheduling time cluster. * @param[in] verbose If true, the required time is written to file. * @param[in] verbose_file If @p verbose is true, this is used as output * file. * @tparam run_count Run count of the corresponding pFMM procedure. It is * used to choose the appropriate s2m operations for this * run in case of the hypersingular operator. */ template< slou run_count > void m_list_task( const distributed_block_vector & x, mesh::scheduling_time_cluster * t_cluster, bool verbose, const std::string & verbose_file ) const; /** * Task in the L-list * @param[in] y_pFMM Output vector * @param[in] t_cluster Considered scheduling time cluster. * @param[in] verbose If true, the required time is written to file. * @param[in] verbose_file If @p verbose is true, this is used as output * file. * @tparam run_count Run count of the corresponding pFMM procedure. It is * used to choose the appropriate l2t operations for this * run in case of the hypersingular operator. */ template< slou run_count > void l_list_task( distributed_block_vector & y_pFMM, mesh::scheduling_time_cluster * t_cluster, bool verbose, const std::string & verbose_file ) const; /** * Task in the M2L-list * @param[in] y_pFMM Output vector * @param[in] t_cluster Considered scheduling time cluster. * @param[in] verbose If true, the required time is written to file. * @param[in] verbose_file If @p verbose is true, this is used as output * file. * @tparam run_count Run count of the corresponding pFMM procedure. It is * used to choose the appropriate l2t operations for this * run in case of the hypersingular operator. */ template< slou run_count > void m2l_list_task( distributed_block_vector & y_pFMM, mesh::scheduling_time_cluster * t_cluster, bool verbose, const std::string & verbose_file ) const; /** * @param[in] current_index Index of the received data. * @param[in] current_cluster Processed scheduling_time_cluster. */ void upward_path_task( lou current_index, mesh::scheduling_time_cluster * current_cluster ) const; const MPI_Comm * _comm; //!< MPI communicator associated with the pFMM matrix. int _my_rank; //!< MPI rank of current process. mesh::distributed_spacetime_cluster_tree * _distributed_spacetime_tree; //!< part of a distributed tree hierarchically //!< decomposing the space-time domain. mesh::tree_structure * _scheduling_tree_structure; //!< Temporal tree structure used for //!< scheduling the FMM operations std::unordered_map< mesh::general_spacetime_cluster *, std::vector< full_matrix * > > _clusterwise_nearfield_matrices; //!< nearfield matrices for all the space- //!< time leaf clusters and their //!< nearfield clusters. std::list< mesh::scheduling_time_cluster * > _m_list; //!< M-list for the execution of the FMM. std::list< mesh::scheduling_time_cluster * > _m2l_list; //!< M2L-list for the execution of the FMM. std::list< mesh::scheduling_time_cluster * > _l_list; //!< L2L-list for the execution of the FMM. std::list< mesh::scheduling_time_cluster * > _n_list; //!< N-list for the execution of the FMM. std::vector< std::pair< mesh::scheduling_time_cluster *, lo > > _receive_data_information; //!< Contains for each data which has to be //!< received the corresponding scheduling time //!< cluster to which the data belongs and the //!< id of the process which sends it. The data //!< is either the moments or the local //!< contributions of the associated cluster. //!< The first @p _n_moments_to_receive_upward //!< entries belong to moments which have to be //!< received in the upward path of the FMM, the //!< next @p _n_moments_to_receive_m2l entries //!< to moments which have to be received for //!< M2L operations and the remaining entries to //!< local contributions which have to be //!< received in the downward path. lou _n_moments_to_receive_upward; //!< Number of grouped moments which have //!< to be received in the upward path of //!< the FMM. lou _n_moments_to_receive_m2l; //!< Number of grouped moments which have to //!< be received for M2L operations. std::vector< vector_type > _m2m_coeffs_s_dim_0_left; //!< left spatial m2m matrices along dimension 0 //!< stored levelwise. std::vector< vector_type > _m2m_coeffs_s_dim_0_right; //!< right spatial m2m matrices along //!< dimension 0 stored levelwise. std::vector< vector_type > _m2m_coeffs_s_dim_1_left; //!< left spatial m2m matrices along dimension 1 //!< stored levelwise. std::vector< vector_type > _m2m_coeffs_s_dim_1_right; //!< right spatial m2m matrices along //!< dimension 1 stored levelwise. std::vector< vector_type > _m2m_coeffs_s_dim_2_left; //!< left spatial m2m matrices along dimension 2 //!< stored levelwise. std::vector< vector_type > _m2m_coeffs_s_dim_2_right; //!< right spatial m2m matrices along //!< dimension 2 stored levelwise. int _temp_order; //!< degree of interpolation polynomials in time for pFMM. int _spat_order; //!< degree of Chebyshev polynomials for expansion in //!< space in pFMM. int _order_regular; //!< Triangle quadrature order for the regular integrals. //!< Used for computation of quadratures in S2M steps. int _m2l_integration_order; //!< _m2l_integration_order + 1 quadrature //!< points are used for the approximation of //!< the m2l coefficients. int _spat_contribution_size; //!< Spatial size of a contribution. It is //!< _spat_order + 3 choose 3 int _contribution_size; //!< Size of a contribution (moment or local //!< contribution) of a single spacetime cluster. mutable bem::chebyshev_evaluator _chebyshev; //!< Evaluator of the Chebyshev polynomials. mutable bem::lagrange_interpolant _lagrange; //!< Evaluator of the Lagrange polynomials. sc _alpha; //!< Heat conductivity. std::vector< sc, besthea::allocator_type< sc > > _cheb_nodes_sum_coll; //!< summed Chebyshev nodes for collapsed loop, //!< aligned std::vector< sc, besthea::allocator_type< sc > > _all_poly_vals_mult_coll; //!< summed Chebyshev nodes for collapsed loop, //!< aligned mutable std::vector< full_matrix > _aux_buffer_0; //!< Auxilliary vector used to store intermediate results in //!< M2L operations. mutable std::vector< full_matrix > _aux_buffer_1; //!< Auxilliary vector used to store intermediate results in //!< M2L operations. bool _verbose; //!< print info to files during matrix-vector multiplication bool _measure_tasks; //!< print task time info to files during //!< matrix-vector multiplications mutable lo _non_nf_op_count; //!< counter to keep track of the number of //!< scheduled non-nearfield operations /*! * Increases @ref _non_nf_op_count. */ void add_nn_operations( ) const { #pragma omp atomic update _non_nf_op_count++; } /*! * Decreases @ref _non_nf_op_count. */ void reduce_nn_operations( ) const { #pragma omp atomic update _non_nf_op_count--; } /*! * @returns the value of @ref _non_nf_op_count */ lo get_nn_operations( ) const { lo ret_val; #pragma omp atomic read ret_val = _non_nf_op_count; return ret_val; } mutable timer_type _global_timer; //!< structure for time measurements. // using clock_type = std::chrono::high_resolution_clock; using time_type = std::chrono::microseconds; //!< Unit type. mutable std::vector< std::vector< time_type::rep > > _m_task_times; //!< Contains a vector for each thread in which the //!< beginning and end times of primary m-list tasks which //!< this thread executed are stored. mutable std::vector< std::vector< time_type::rep > > _m2l_task_times; //!< Same as @ref _m_task_times for primary m2l-list //!< tasks. mutable std::vector< std::vector< time_type::rep > > _l_task_times; //!< Same as @ref _m_task_times for primary l-list //!< tasks. mutable std::vector< std::vector< time_type::rep > > _n_task_times; //!< Same as @ref _m_task_times for primary n-list //!< tasks. mutable std::vector< std::vector< time_type::rep > > _m_subtask_times; //!< Contains a vector for each thread in which the //!< beginning and end times of the subtasks in the //!< m-list which this thread executed are stored. mutable std::vector< std::vector< time_type::rep > > _m2l_subtask_times; //!< Same as @ref _m_subtask_times for m2l-list //!< subtasks. mutable std::vector< std::vector< time_type::rep > > _l_subtask_times; //!< Same as @ref _m_subtask_times for l-list //!< subtasks. mutable std::vector< std::vector< time_type::rep > > _n_subtask_times; //!< Same as @ref _m_subtask_times for n-list //!< subtasks. mutable std::vector< std::vector< time_type::rep > > _mpi_send_m2l; //!< Contains a vector for each thread. The entries in these //!< vectors are the times when the sending of a group of //!< moments to another process for m2l-list operations has //!< started. mutable std::vector< std::vector< time_type::rep > > _mpi_send_m_parent; //!< Same as @ref _mpi_send_m2l for sending moments //!< for m-list operations. mutable std::vector< std::vector< time_type::rep > > _mpi_send_l_children; //!< Same as @ref _mpi_send_m2l for sending local //!< contributions for l-list operations. mutable std::vector< std::vector< time_type::rep > > _mpi_recv_m2l; //!< Contains a vector for each thread. The entries in these //!< vectors are the times when the thread has detected the //!< reception of a group of moments needed for m2l-list //!< operations. mutable std::vector< std::vector< time_type::rep > > _mpi_recv_m_parent; //!< Same as @ref _mpi_recv_m2l for receiving moments //!< needed for m-list operations. mutable std::vector< std::vector< time_type::rep > > _mpi_recv_l_children; //!< Same as @ref _mpi_recv_m2l for receiving local //!< contributions needed for l-list operations. /*! * Saves task duration measurement per thread in files (1 per MPI rank). */ void save_times( time_type::rep total_loop_duration, time_type::rep total_apply_duration ) const; }; /** Typedef for the distributed single layer p0-p0 PFMM matrix */ typedef besthea::linear_algebra::distributed_pFMM_matrix< besthea::bem::spacetime_heat_sl_kernel_antiderivative, besthea::bem::distributed_fast_spacetime_be_space< besthea::bem::basis_tri_p0 >, besthea::bem::distributed_fast_spacetime_be_space< besthea::bem::basis_tri_p0 > > distributed_pFMM_matrix_heat_sl_p0p0; /** Typedef for the distributed double layer p0-p1 PFMM matrix */ typedef besthea::linear_algebra::distributed_pFMM_matrix< besthea::bem::spacetime_heat_dl_kernel_antiderivative, besthea::bem::distributed_fast_spacetime_be_space< besthea::bem::basis_tri_p0 >, besthea::bem::distributed_fast_spacetime_be_space< besthea::bem::basis_tri_p1 > > distributed_pFMM_matrix_heat_dl_p0p1; /** Typedef for the distributed spatially adjoint double layer p1-p0 PFMM matrix */ typedef besthea::linear_algebra::distributed_pFMM_matrix< besthea::bem::spacetime_heat_adl_kernel_antiderivative, besthea::bem::distributed_fast_spacetime_be_space< besthea::bem::basis_tri_p1 >, besthea::bem::distributed_fast_spacetime_be_space< besthea::bem::basis_tri_p0 > > distributed_pFMM_matrix_heat_adl_p1p0; /** Typedef for the distributed hypersingular p1-p1 PFMM matrix */ typedef besthea::linear_algebra::distributed_pFMM_matrix< besthea::bem::spacetime_heat_hs_kernel_antiderivative, besthea::bem::distributed_fast_spacetime_be_space< besthea::bem::basis_tri_p1 >, besthea::bem::distributed_fast_spacetime_be_space< besthea::bem::basis_tri_p1 > > distributed_pFMM_matrix_heat_hs_p1p1; #endif /* INCLUDE_BESTHEA_DISTRIBUTED_PFMM_MATRIX_H_ */
convolution_winograd_transform.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd43_transform_input_msa(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 4) + (j * 4); for (int m = 0; m < 6; m++) { float r00 = r0[0]; float r01 = r0[1]; float r02 = r0[2]; float r03 = r0[3]; float r04 = r0[4]; float r05 = r0[5]; float tmp0m = 4 * r00 - 5 * r02 + r04; float tmp1m = -4 * (r01 + r02) + r04 + r03; float tmp2m = 4 * (r01 - r02) + r04 - r03; float tmp3m = -2 * (r01 - r03) + r04 - r02; float tmp4m = 2 * (r01 - r03) + r04 - r02; float tmp5m = 4 * r01 - 5 * r03 + r05; tmp[0][m] = tmp0m; tmp[1][m] = tmp1m; tmp[2][m] = tmp2m; tmp[3][m] = tmp3m; tmp[4][m] = tmp4m; tmp[5][m] = tmp5m; r0 += w; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j); float* r0_tm_1 = r0_tm_0 + tiles; float* r0_tm_2 = r0_tm_0 + tiles * 2; float* r0_tm_3 = r0_tm_0 + tiles * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4; float* r0_tm_5 = r0_tm_0 + tiles * 5; for (int m = 0; m < 6; m++) { float tmp00 = tmp[m][0]; float tmp01 = tmp[m][1]; float tmp02 = tmp[m][2]; float tmp03 = tmp[m][3]; float tmp04 = tmp[m][4]; float tmp05 = tmp[m][5]; float r0tm0 = 4 * tmp00 - 5 * tmp02 + tmp04; float r0tm1 = -4 * (tmp01 + tmp02) + tmp04 + tmp03; float r0tm2 = 4 * (tmp01 - tmp02) + tmp04 - tmp03; float r0tm3 = -2 * (tmp01 - tmp03) + tmp04 - tmp02; float r0tm4 = 2 * (tmp01 - tmp03) + tmp04 - tmp02; float r0tm5 = 4 * tmp01 - 5 * tmp03 + tmp05; r0_tm_0[0] = r0tm0; r0_tm_1[0] = r0tm1; r0_tm_2[0] = r0tm2; r0_tm_3[0] = r0tm3; r0_tm_4[0] = r0tm4; r0_tm_5[0] = r0tm5; r0_tm_0 += tiles * 6; r0_tm_1 += tiles * 6; r0_tm_2 += tiles * 6; r0_tm_3 += tiles * 6; r0_tm_4 += tiles * 6; r0_tm_5 += tiles * 6; } } } } } static void conv3x3s1_winograd43_transform_output_msa(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float bias0 = biasptr ? biasptr[p] : 0.f; float tmp[4][6]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j); const float* output0_tm_1 = output0_tm_0 + tiles; const float* output0_tm_2 = output0_tm_0 + tiles * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 5; float* output0 = out0.row(i * 4) + (j * 4); for (int m = 0; m < 6; m++) { float out0tm0 = output0_tm_0[0]; float out0tm1 = output0_tm_1[0]; float out0tm2 = output0_tm_2[0]; float out0tm3 = output0_tm_3[0]; float out0tm4 = output0_tm_4[0]; float out0tm5 = output0_tm_5[0]; float tmp02a = out0tm1 + out0tm2; float tmp13a = out0tm1 - out0tm2; float tmp02b = out0tm3 + out0tm4; float tmp13b = out0tm3 - out0tm4; float tmp0m = out0tm0 + tmp02a + tmp02b; float tmp1m = tmp13a + tmp13b * 2; float tmp2m = tmp02a + tmp02b * 4; float tmp3m = out0tm5 + tmp13a + tmp13b * 8; tmp[0][m] = tmp0m; tmp[1][m] = tmp1m; tmp[2][m] = tmp2m; tmp[3][m] = tmp3m; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 0; m < 4; m++) { float tmp00 = tmp[m][0]; float tmp01 = tmp[m][1]; float tmp02 = tmp[m][2]; float tmp03 = tmp[m][3]; float tmp04 = tmp[m][4]; float tmp05 = tmp[m][5]; float tmp02a = tmp01 + tmp02; float tmp13a = tmp01 - tmp02; float tmp02b = tmp03 + tmp04; float tmp13b = tmp03 - tmp04; float out00 = bias0 + tmp00 + tmp02a + tmp02b; float out01 = bias0 + tmp13a + tmp13b * 2; float out02 = bias0 + tmp02a + tmp02b * 4; float out03 = bias0 + tmp05 + tmp13a + tmp13b * 8; output0[0] = out00; output0[1] = out01; output0[2] = out02; output0[3] = out03; output0 += outw; } } } } } static void conv3x3s1_winograd23_transform_input_msa(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 2; const int h_tiles = (h - 2) / 2; const int tiles = w_tiles * h_tiles; // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; // 0 = r00 - r02 // 1 = r01 + r02 // 2 = r02 - r01 // 3 = r03 - r01 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[4][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 2) + (j * 2); for (int m = 0; m < 4; m++) { float r00 = r0[0]; float r01 = r0[1]; float r02 = r0[2]; float r03 = r0[3]; float tmp0m = r00 - r02; float tmp1m = r01 + r02; float tmp2m = r02 - r01; float tmp3m = r03 - r01; tmp[0][m] = tmp0m; tmp[1][m] = tmp1m; tmp[2][m] = tmp2m; tmp[3][m] = tmp3m; r0 += w; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j); float* r0_tm_1 = r0_tm_0 + tiles; float* r0_tm_2 = r0_tm_0 + tiles * 2; float* r0_tm_3 = r0_tm_0 + tiles * 3; for (int m = 0; m < 4; m++) { float tmp00 = tmp[m][0]; float tmp01 = tmp[m][1]; float tmp02 = tmp[m][2]; float tmp03 = tmp[m][3]; float r0tm0 = tmp00 - tmp02; float r0tm1 = tmp01 + tmp02; float r0tm2 = tmp02 - tmp01; float r0tm3 = tmp03 - tmp01; r0_tm_0[0] = r0tm0; r0_tm_1[0] = r0tm1; r0_tm_2[0] = r0tm2; r0_tm_3[0] = r0tm3; r0_tm_0 += tiles * 4; r0_tm_1 += tiles * 4; r0_tm_2 += tiles * 4; r0_tm_3 += tiles * 4; } } } } } static void conv3x3s1_winograd23_transform_output_msa(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 2; const int h_tiles = outh / 2; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r00 + r01 + r02 // 1 = r01 - r02 + r03 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float bias0 = biasptr ? biasptr[p] : 0.f; float tmp[2][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j); const float* output0_tm_1 = output0_tm_0 + tiles; const float* output0_tm_2 = output0_tm_0 + tiles * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 3; float* output0 = out0.row(i * 2) + (j * 2); for (int m = 0; m < 4; m++) { float out0tm0 = output0_tm_0[0]; float out0tm1 = output0_tm_1[0]; float out0tm2 = output0_tm_2[0]; float out0tm3 = output0_tm_3[0]; float tmp0m = out0tm0 + out0tm1 + out0tm2; float tmp1m = out0tm1 - out0tm2 + out0tm3; tmp[0][m] = tmp0m; tmp[1][m] = tmp1m; output0_tm_0 += tiles * 4; output0_tm_1 += tiles * 4; output0_tm_2 += tiles * 4; output0_tm_3 += tiles * 4; } for (int m = 0; m < 2; m++) { float tmp00 = tmp[m][0]; float tmp01 = tmp[m][1]; float tmp02 = tmp[m][2]; float tmp03 = tmp[m][3]; float out00 = bias0 + tmp00 + tmp01 + tmp02; float out01 = bias0 + tmp01 - tmp02 + tmp03; output0[0] = out00; output0[1] = out01; output0 += outw; } } } } }
issue_002.c
#include <stdio.h> #include "assert.h" #include <unistd.h> // 920 fails #define TRIALS 452 // 6000 fails #define N 64*5000 int main() { int fail = 0; double A[N], B[N], C[N]; for (int i = 0; i < N; i++) { A[i] = 0.0; B[i] = 0.0; C[i] = 1.0; } int nte = 32; int tl = 64; int blockSize = tl; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(nte) thread_limit(tl) { #pragma omp distribute for(int j = 0 ; j < N ; j += blockSize) { #pragma omp parallel for for(int i = j ; i < j+blockSize; i++) { A[i] += B[i] + C[i]; } } } } for(int i = 0 ; i < N ; i++) { if (A[i] != TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } } if(fail) { printf("Failed\n"); return 1; } else { printf("Succeeded\n"); return 0; } }
GB_unop__log10_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log10_fp64_fp64 // op(A') function: GB_unop_tran__log10_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = log10 (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log10 (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = log10 (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG10 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log10_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = log10 (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = log10 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log10_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_getEnvInfo.c
/****************************************************************************** * FILE: omp_getEnvInfo.c * DESCRIPTION: * OpenMP Example - Get Environment Information - C/C++ Version * The master thread queries and prints selected environment information. * AUTHOR: Blaise Barney 7/06 * LAST REVISED: 05/18/16 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid, procs, maxt, inpar, dynamic, nested; /* Start parallel region */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); /* Only master thread does this */ if (tid == 0) { printf("Thread %d getting environment info...\n", tid); /* Get environment information */ procs = omp_get_num_procs(); nthreads = omp_get_num_threads(); maxt = omp_get_max_threads(); inpar = omp_in_parallel(); dynamic = omp_get_dynamic(); nested = omp_get_nested(); /* Print environment information */ printf("Number of processors = %d\n", procs); printf("Number of threads = %d\n", nthreads); printf("Max threads = %d\n", maxt); printf("In parallel? = %d\n", inpar); printf("Dynamic threads enabled? = %d\n", dynamic); printf("Nested parallelism enabled? = %d\n", nested); } } /* Done */ }
affinuma.c
#include "affinuma.h" struct numa_node_bw * numa_node_list = NULL; struct numa_node_bw * numa_list_head = NULL; int mem_types; int max_node; int numt; int total_numa_nodes = 0; int * numa_node_ids; struct bitmask * numa_nodes; char ** mem_tech; long double * means; int * cluster_sizes; char classes[3][8] = {"fast", "slow", "slowest"}; char * cpu_range; void label_mem(){ struct numa_node_bw * bw_it = numa_list_head; struct numa_node_bw * next_bw_it = bw_it->next; int i = 0; bw_it->mem_type = classes[i]; while(next_bw_it != NULL){ long double diff = bw_it->owtr_avg - next_bw_it->owtr_avg; long double perct = 0.2*bw_it->owtr_avg; if((diff > perct)&&((i+1)<3)){ i++; } next_bw_it->mem_type = classes[i]; bw_it = next_bw_it; next_bw_it= bw_it->next; } } void sort_list(struct numa_node_bw * new_node){ struct numa_node_bw * bw_it = numa_list_head; struct numa_node_bw * prev_bw_it = NULL; while(bw_it != NULL){ if((bw_it->owtr_avg < new_node->owtr_avg)){ if(prev_bw_it == NULL){ new_node->next = bw_it; numa_list_head = new_node; }else{ prev_bw_it->next = new_node; new_node->next = bw_it; } return; } prev_bw_it = bw_it; bw_it = bw_it->next; } prev_bw_it->next = new_node; return; } void write_config_file(){ FILE * conf; char fname[50]; strcpy(fname, "numa_class"); conf = fopen(fname, "w"); struct numa_node_bw * bw_it = numa_list_head; printf("CPU ID\tNUMA ID\tType\tInit(Mb/s)\tTriad(Mb/s)\n"); while(bw_it != NULL){ fprintf(conf, "%d %s %Lf\n", bw_it->numa_id, bw_it->mem_type, bw_it->owtr_avg); printf("%s\t%d\t%s\t%LF\t%Lf\n", cpu_range, bw_it->numa_id, bw_it->mem_type, bw_it->wr_only_avg, bw_it->owtr_avg); bw_it = bw_it->next; } fclose(conf); } void numatest(int argc, char ** argv){ cpu_range=argv[1]; max_node = numa_max_node() + 1; int cpu_count = numa_num_possible_cpus(); numa_node_ids = (int*)malloc(sizeof(int)*max_node); struct bitmask * numa_nodes = numa_get_membind(); int i = 0; while(i < numa_nodes->size){ if(numa_bitmask_isbitset(numa_nodes, i)){ numa_node_ids[total_numa_nodes] = i; total_numa_nodes++; } i++; } int mbs = 64; size_t size = mbs*1024*1024; int r_size = 16*32768; int c_size = 16*32768; double *a, *b, *c; clock_t start, end; struct timespec begin, stop; srand(clock()); //sleep(5); i = 0; while(i < total_numa_nodes){ int iters = 0; int stride; long double wr_only_avg=0.0; long double owtr_avg=0.0; long double accum; for( iters = 0; iters < 10; iters++){ int j = 0; int k = 0; a = (double*)numa_alloc_onnode(size, numa_node_ids[i]); b = (double*)numa_alloc_onnode(size, numa_node_ids[i]); c = (double*)numa_alloc_onnode(size, numa_node_ids[i]); long double empty=0.0; long double empty2=0.0; redo1: clock_gettime( CLOCK_MONOTONIC, &begin); #pragma omp parallel for for(j = 0;j < (size/sizeof(double));j++){ a[j] = 1.0; b[j] = 2.0; c[j] = 3.0; } clock_gettime( CLOCK_MONOTONIC, &stop); accum = ( stop.tv_sec - begin.tv_sec ) + (long double)( stop.tv_nsec - begin.tv_nsec ) / (long double)BILLION; if(accum <= empty){ goto redo1; } wr_only_avg += ((8*size*1.0E-06)/(long double)(accum - empty)); redo3: clock_gettime( CLOCK_MONOTONIC, &begin); #pragma omp parallel for for(j =0; j < (size/sizeof(double)); j++){ a[j] = c[j] + b[j]; } clock_gettime( CLOCK_MONOTONIC, &stop); accum = ( stop.tv_sec - begin.tv_sec ) + (long double)( stop.tv_nsec - begin.tv_nsec ) / (long double)BILLION; if(accum <= empty){ goto redo3; } owtr_avg += ((3*size*1.0E-06)/(long double)(accum - empty)); numa_free(a, size); numa_free(b, size); numa_free(c, size); } struct numa_node_bw * node_bw = (struct numa_node_bw *)malloc(sizeof(struct numa_node_bw)); node_bw->numa_id = numa_node_ids[i]; node_bw->wr_only_avg = wr_only_avg/10; node_bw->owtr_avg = owtr_avg/10; node_bw->next = NULL; if(numa_node_list == NULL){ numa_node_list = node_bw; numa_list_head = numa_node_list; } else{ sort_list(node_bw); } i++; } label_mem(); write_config_file(); }
GB_binop__fmod_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_01__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__fmod_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__fmod_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__fmod_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__fmod_fp32) // C=scalar+B GB (_bind1st__fmod_fp32) // C=scalar+B' GB (_bind1st_tran__fmod_fp32) // C=A+scalar GB (_bind2nd__fmod_fp32) // C=A'+scalar GB (_bind2nd_tran__fmod_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = fmodf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = fmodf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FMOD || GxB_NO_FP32 || GxB_NO_FMOD_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__fmod_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__fmod_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__fmod_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__fmod_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__fmod_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = fmodf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__fmod_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = fmodf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmodf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__fmod_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmodf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mask.c
// This program is free software: you can use, modify and/or redistribute it // under the terms of the simplified BSD License. You should have received a // copy of this license along this program. If not, see // <http://www.opensource.org/licenses/bsd-license.html>. // // Copyright (C) 2012, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es> // All rights reserved. #ifndef MASK_C #define MASK_C #include <stdlib.h> #include <stdio.h> #include <math.h> #include "xmalloc.c" #define BOUNDARY_CONDITION_DIRICHLET 0 #define BOUNDARY_CONDITION_REFLECTING 1 #define BOUNDARY_CONDITION_PERIODIC 2 #define DEFAULT_GAUSSIAN_WINDOW_SIZE 5 #define DEFAULT_BOUNDARY_CONDITION BOUNDARY_CONDITION_REFLECTING /** * * Compute the gradient of an image using centered differences * */ void gradient( const float *input, // input image float *dx, // computed x derivative float *dy, // computed y derivative const int nx, // image width const int ny // image height ) { // compute gradient in the central body of the image #pragma omp parallel for for(int i = 1; i < ny-1; i++) { for(int j = 1; j < nx-1; j++) { const int k = i * nx + j; dx[k] = 0.5*(input[k+1] - input[k-1]); dy[k] = 0.5*(input[k+nx] - input[k-nx]); } } // compute gradient in the first and last rows for(int j = 1; j < nx-1; j++) { dx[j] = 0.5*(input[j+1] - input[j-1]); dy[j] = 0.5*(input[j+nx] - input[j]); const int k = (ny - 1) * nx + j; dx[k] = 0.5*(input[k+1] - input[k-1]); dy[k] = 0.5*(input[k] - input[k-nx]); } // compute gradient in the first and last columns for(int i = 1; i < ny-1; i++) { const int p = i * nx; dx[p] = 0.5*(input[p+1] - input[p]); dy[p] = 0.5*(input[p+nx] - input[p-nx]); const int k = (i+1) * nx - 1; dx[k] = 0.5*(input[k] - input[k-1]); dy[k] = 0.5*(input[k+nx] - input[k-nx]); } // compute the gradient in the corners dx[0] = 0.5*(input[1] - input[0]); dy[0] = 0.5*(input[nx] - input[0]); dx[nx-1] = 0.5*(input[nx-1] - input[nx-2]); dy[nx-1] = 0.5*(input[2*nx-1] - input[nx-1]); dx[(ny-1)*nx] = 0.5*(input[(ny-1)*nx + 1] - input[(ny-1)*nx]); dy[(ny-1)*nx] = 0.5*(input[(ny-1)*nx] - input[(ny-2)*nx]); dx[ny*nx-1] = 0.5*(input[ny*nx-1] - input[ny*nx-1-1]); dy[ny*nx-1] = 0.5*(input[ny*nx-1] - input[(ny-1)*nx-1]); } /** * * In-place Gaussian smoothing of an image * */ void gaussian( float *I, // input/output image const int xdim, // image width const int ydim, // image height const double sigma // Gaussian sigma ) { const int boundary_condition = DEFAULT_BOUNDARY_CONDITION; const int window_size = DEFAULT_GAUSSIAN_WINDOW_SIZE; const double den = 2*sigma*sigma; const int size = (int) (window_size * sigma) + 1 ; const int bdx = xdim + size; const int bdy = ydim + size; if (boundary_condition && size > xdim) { fprintf(stderr, "GaussianSmooth: sigma too large\n"); abort(); } // compute the coefficients of the 1D convolution kernel double B[size]; for(int i = 0; i < size; i++) B[i] = 1 / (sigma * sqrt(2.0 * 3.1415926)) * exp(-i * i / den); // normalize the 1D convolution kernel double norm = 0; for(int i = 0; i < size; i++) norm += B[i]; norm *= 2; norm -= B[0]; for(int i = 0; i < size; i++) B[i] /= norm; // convolution of each line of the input image double *R = xmalloc((size + xdim + size)*sizeof*R); for (int k = 0; k < ydim; k++) { int i, j; for (i = size; i < bdx; i++) R[i] = I[k * xdim + i - size]; switch (boundary_condition) { case BOUNDARY_CONDITION_DIRICHLET: for(i = 0, j = bdx; i < size; i++, j++) R[i] = R[j] = 0; break; case BOUNDARY_CONDITION_REFLECTING: for(i = 0, j = bdx; i < size; i++, j++) { R[i] = I[k * xdim + size-i]; R[j] = I[k * xdim + xdim-i-1]; } break; case BOUNDARY_CONDITION_PERIODIC: for(i = 0, j = bdx; i < size; i++, j++) { R[i] = I[k * xdim + xdim-size+i]; R[j] = I[k * xdim + i]; } break; } for (i = size; i < bdx; i++) { double sum = B[0] * R[i]; for (j = 1; j < size; j++ ) sum += B[j] * ( R[i-j] + R[i+j] ); I[k * xdim + i - size] = sum; } } // convolution of each column of the input image double *T = xmalloc((size + ydim + size)*sizeof*T); for (int k = 0; k < xdim; k++) { int i, j; for (i = size; i < bdy; i++) T[i] = I[(i - size) * xdim + k]; switch (boundary_condition) { case BOUNDARY_CONDITION_DIRICHLET: for (i = 0, j = bdy; i < size; i++, j++) T[i] = T[j] = 0; break; case BOUNDARY_CONDITION_REFLECTING: for (i = 0, j = bdy; i < size; i++, j++) { T[i] = I[(size-i) * xdim + k]; T[j] = I[(ydim-i-1) * xdim + k]; } break; case BOUNDARY_CONDITION_PERIODIC: for( i = 0, j = bdx; i < size; i++, j++) { T[i] = I[(ydim-size+i) * xdim + k]; T[j] = I[i * xdim + k]; } break; } for (i = size; i < bdy; i++) { double sum = B[0] * T[i]; for (j = 1; j < size; j++ ) sum += B[j] * (T[i-j] + T[i+j]); I[(i - size) * xdim + k] = sum; } } free(R); free(T); } #endif//MASK_C
backward_euler_monolithic_ale_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso // #ifndef KRATOS_BACKWARD_EULER_MONOLITHIC_ALE_SCHEME #define KRATOS_BACKWARD_EULER_MONOLITHIC_ALE_SCHEME /* System includes */ /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/dof_updater.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" #include "../../FluidDynamicsApplication/custom_strategies/strategies/residualbased_predictorcorrector_velocity_bossak_scheme_turbulent.h" namespace Kratos { //@name Kratos Globals //@{ //@} //@name Type Definitions //@{ //@} //@name Enum's //@{ //@} //@name Functions //@{ //@} //@name Kratos Classes //@{ /** * @class BackwardEulerMonolithicAleScheme * @ingroup PFEM2Application * @brief A first order scheme for testing purpose */ template<class TSparseSpace, class TDenseSpace > class BackwardEulerMonolithicAleScheme : public ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent<TSparseSpace, TDenseSpace> { public: //@name Type Definitions //@{ KRATOS_CLASS_POINTER_DEFINITION(BackwardEulerMonolithicAleScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent<TSparseSpace, TDenseSpace> BossakType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; //@} //@name Life Cycle //@{ BackwardEulerMonolithicAleScheme(unsigned int DomainSize, bool IsLagrangian = true) : ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent<TSparseSpace, TDenseSpace>(0.0, 0.0, DomainSize) { mIsLagrangian = IsLagrangian; BossakType::mGammaNewmark = 1.0; } ~BackwardEulerMonolithicAleScheme() override {} //@} //@name Operations //@{ void Update(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { BossakType::Update(rModelPart, rDofSet, A, Dv, b); this->Pfem2AdditionalUpdateOperations(rModelPart, rDofSet, A, Dv, b); } void Predict(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { BossakType::Predict(rModelPart, rDofSet, A, Dv, b); this->Pfem2AdditionalUpdateOperations(rModelPart, rDofSet, A, Dv, b); } void Pfem2AdditionalUpdateOperations(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) { if (mIsLagrangian) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rModelPart.NumberOfNodes()); ++i) { auto it_node = rModelPart.NodesBegin() + i; noalias(it_node->FastGetSolutionStepValue(MESH_VELOCITY)) = it_node->FastGetSolutionStepValue(VELOCITY); } } } //@} protected: ///@name Protected member Variables ///@{ bool mIsLagrangian; //@} }; // Class BackwardEulerMonolithicAleScheme //@} //@name Type Definitions //@{ //@} } // namespace Kratos #endif // KRATOS_BACKWARD_EULER_MONOLITHIC_ALE_SCHEME
ssyrk.c
#include "blas.h" #include "error.h" #include <stdio.h> #include "handle.h" #include "config.h" #include "ssyrk.fatbin.c" static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; } static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; } static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj, const void * B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj, CUdeviceptr B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static const float zero = 0.0f; static const float one = 1.0f; void ssyrk(CBlasUplo uplo, CBlasTranspose trans, size_t n, size_t k, float alpha, const float * restrict A, size_t lda, float beta, float * restrict C, size_t ldc) { const size_t nRowA = (trans == CBlasNoTrans) ? n : k; int info = 0; if (lda < nRowA) info = 7; else if (ldc < n) info = 10; if (info != 0) { XERBLA(info); return; } if (n == 0 || ((alpha == zero || k == 0) && beta == one)) return; if (alpha == zero) { if (uplo == CBlasUpper) { if (beta == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i <= j; i++) C[j * ldc + i] = zero; } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i <= j; i++) C[j * ldc + i] *= beta; } } } else { if (beta == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = j; i < n; i++) C[j * ldc + i] = zero; } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = j; i < n; i++) C[j * ldc + i] *= beta; } } } return; } if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { if (beta == zero) { for (size_t i = 0; i <= j; i++) C[j * ldc + i] = zero; } else if (beta != one) { for (size_t i = 0; i <= j; i++) C[j * ldc + i] *= beta; } for (size_t l = 0; l < k; l++) { if (A[l * lda + j] != zero) { register float temp = alpha * A[l * lda + j]; for (size_t i = 0; i <= j; i++) C[j * ldc + i] += temp * A[l * lda + i]; } } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { if (beta == zero) { for (size_t i = j; i < n; i++) C[j * ldc + i] = zero; } else if (beta != one) { for (size_t i = j; i < n; i++) C[j * ldc + i] *= beta; } for (size_t l = 0; l < k; l++) { if (A[l * lda + j] != zero) { register float temp = alpha * A[l * lda + j]; for (size_t i = j; i < n; i++) C[j * ldc + i] += temp * A[l * lda + i]; } } } } } else { if (uplo == CBlasUpper) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i <= j; i++) { register float temp = zero; for (size_t l = 0; l < k; l++) temp += A[i * lda + l] * A[j * lda + l]; if (beta == zero) C[j * ldc + i] = alpha * temp; else C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i]; } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = j; i < n; i++) { register float temp = zero; for (size_t l = 0; l < k; l++) temp += A[i * lda + l] * A[j * lda + l]; if (beta == zero) C[j * ldc + i] = alpha * temp; else C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i]; } } } } } CUresult cuSsyrk(CUBLAShandle handle, CBlasUplo uplo, CBlasTranspose trans, size_t n, size_t k, float alpha, CUdeviceptr A, size_t lda, float beta, CUdeviceptr C, size_t ldc, CUstream stream) { const size_t nRowA = (trans == CBlasNoTrans) ? n : k; int info = 0; if (lda < nRowA) info = 7; else if (ldc < n) info = 10; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (n == 0 || ((alpha == zero || k == 0) && beta == one)) return CUDA_SUCCESS; CU_ERROR_CHECK(cuCtxPushCurrent(handle->context)); if (handle->ssyrk == NULL) CU_ERROR_CHECK(cuModuleLoadData(&handle->ssyrk, imageBytes)); const unsigned int mb = (trans == CBlasNoTrans) ? 64 : 32; const unsigned int nb = (trans == CBlasNoTrans) ? 16 : 32; const unsigned int kb = (trans == CBlasNoTrans) ? 16 : 8; const unsigned int bx = (trans == CBlasNoTrans) ? 16 : 8; const unsigned int by = (trans == CBlasNoTrans) ? 4 : 8; char name[82]; snprintf(name, 82, "_Z5ssyrkIL9CBlasUplo%dEL14CBlasTranspose%dELj%uELj%uELj%uELj%uELj%uEEvPKfPfffiiii", uplo, trans, mb, nb, kb, bx, by); CUfunction function; CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->ssyrk, name)); void * params[] = { &A, &C, &alpha, &beta, &lda, &ldc, &n, &k }; // unsigned int blocks = (unsigned int)(n + nb - 1) / nb; // blocks = (blocks * (blocks + 1)) / 2; CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(n + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1, bx, by, 1, 0, stream, params, NULL)); CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context)); return CUDA_SUCCESS; } CUresult cuMultiGPUSsyrk(CUmultiGPUBLAShandle handle, CBlasUplo uplo, CBlasTranspose trans, size_t n, size_t k, float alpha, const float * restrict A, size_t lda, float beta, float * restrict C, size_t ldc) { const size_t nRowA = (trans == CBlasNoTrans) ? n : k; int info = 0; if (lda < nRowA) info = 7; else if (ldc < n) info = 10; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (n == 0 || ((alpha == zero || k == 0) && beta == one)) return CUDA_SUCCESS; if (alpha == zero) { if (uplo == CBlasUpper) { if (beta == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i <= j; i++) C[j * ldc + i] = zero; } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i <= j; i++) C[j * ldc + i] *= beta; } } } else { if (beta == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = j; i < n; i++) C[j * ldc + i] = zero; } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = j; i < n; i++) C[j * ldc + i] *= beta; } } } return CUDA_SUCCESS; } const size_t nb = (trans == CBlasNoTrans) ? SGEMM_N_MB : SGEMM_T_NB; if (n < nb) { ssyrk(uplo, trans, n, k, alpha, A, lda, beta, C, ldc); return CUDA_SUCCESS; } if (trans == CBlasNoTrans) { if (uplo == CBlasUpper) { for (size_t j = nb; j < n; j += nb) CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasTrans, j, min(n - j, nb), k, alpha, A, lda, &A[j], lda, beta, &C[j * ldc], ldc)); } else { const size_t m = n - nb; for (size_t j = 0; j < m; j += nb) { const size_t jb = min(n - j, nb); CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasNoTrans, CBlasTrans, n - j - jb, jb, k, alpha, &A[j + jb], lda, &A[j], lda, beta, &C[j * ldc + j + jb], ldc)); } } for (size_t j = 0; j < n; j += nb) ssyrk(uplo, trans, min(n - j, nb), k, alpha, &A[j], lda, beta, &C[j * ldc + j], ldc); } else { if (uplo == CBlasUpper) { for (size_t j = nb; j < n; j += nb) CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasTrans, CBlasNoTrans, j, min(n - j, nb), k, alpha, A, lda, &A[j * lda], lda, beta, &C[j * ldc], ldc)); } else { const size_t m = n - nb; for (size_t j = 0; j < m; j += nb) { const size_t jb = min(n - j, nb); CU_ERROR_CHECK(cuMultiGPUSgemm(handle, CBlasTrans, CBlasNoTrans, n - j - jb, jb, k, alpha, &A[(j + jb) * lda], lda, &A[j * lda], lda, beta, &C[j * ldc + j + jb], ldc)); } } for (size_t j = 0; j < n; j += nb) ssyrk(uplo, trans, min(n - j, nb), k, alpha, &A[j * lda], lda, beta, &C[j * ldc + j], ldc); } return CUDA_SUCCESS; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-5,6)),ceild(8*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(4*t1+Ny+5,24)),floord(8*t2+Ny+4,24)),floord(8*t1-8*t2+Nz+Ny+3,24));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(8*t2-Nz-508,512)),ceild(24*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(4*t1+Nx+5,512)),floord(8*t2+Nx+4,512)),floord(24*t3+Nx+20,512)),floord(8*t1-8*t2+Nz+Nx+3,512));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),24*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),24*t3+22),512*t4+510),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(8*t1+Ny+13,32)),floord(16*t2+Ny+12,32)),floord(16*t1-16*t2+Nz+Ny+11,32));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(16*t2-Nz-60,64)),ceild(32*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(8*t1+Nx+13,64)),floord(16*t2+Nx+12,64)),floord(32*t3+Nx+28,64)),floord(16*t1-16*t2+Nz+Nx+11,64));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),32*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),32*t3+30),64*t4+62),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
many_transfers.c
#include <stdio.h> #define N 17 #define M 100 void init(int* array, int size, int scale) { for (int i = 0; i < size; i++) array[i] = i*scale; } int main() { int tab[N][M]; /* The schedule(static, 1) enforces each iteration to be executed in a different thread, whatever the number of CPU is: */ #pragma omp parallel for schedule(static, 1) for (int i = 0; i < N; i++) { // Map on STHORM in a cyclic way on the cluster first: #pragma smecy map(STHORM, i%2, (i/4)%2) \ arg(1,out,[N][M],/[i][]) \ arg(2,in) \ arg(3,in) init(&tab[i][0], M, i+1); } for (int i = 0; i < N; i++) { printf("Line %d :", i); for (int j = 0; j < M; j += M/10) printf("tab[%d][%d] = %d ", i, j, tab[i][j]); puts(""); } return 0; }
distort.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT % % D D I SS T O O R R T % % D D I SSS T O O RRRR T % % D D I SS T O O R R T % % DDDD IIIII SSSSS T OOO R R T % % % % % % MagickCore Image Distortion Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % June 2007 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distort.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/shear.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" /* Numerous internal routines for image distortions. */ static inline void AffineArgsToCoefficients(double *affine) { /* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4]; affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3]; } static inline void CoefficientsToAffineArgs(double *coeff) { /* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2]; coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3]; } static void InvertAffineCoefficients(const double *coeff,double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 50 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]); inverse[0]=determinant*coeff[4]; inverse[1]=determinant*(-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]); inverse[3]=determinant*(-coeff[3]); inverse[4]=determinant*coeff[0]; inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]); } static void InvertPerspectiveCoefficients(const double *coeff, double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 53 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]); inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]); inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]); inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]); inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]); inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]); inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]); inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]); } /* * Polynomial Term Defining Functions * * Order must either be an integer, or 1.5 to produce * the 2 number_valuesal polynomial function... * affine 1 (3) u = c0 + c1*x + c2*y * bilinear 1.5 (4) u = '' + c3*x*y * quadratic 2 (6) u = '' + c4*x*x + c5*y*y * cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3 * quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4 * quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5 * number in parenthesis minimum number of points needed. * Anything beyond quintic, has not been implemented until * a more automated way of determining terms is found. * Note the slight re-ordering of the terms for a quadratic polynomial * which is to allow the use of a bi-linear (order=1.5) polynomial. * All the later polynomials are ordered simply from x^N to y^N */ static size_t poly_number_terms(double order) { /* Return the number of terms for a 2d polynomial */ if ( order < 1 || order > 5 || ( order != floor(order) && (order-1.5) > MagickEpsilon) ) return 0; /* invalid polynomial order */ return((size_t) floor((order+1)*(order+2)/2)); } static double poly_basis_fn(ssize_t n, double x, double y) { /* Return the result for this polynomial term */ switch(n) { case 0: return( 1.0 ); /* constant */ case 1: return( x ); case 2: return( y ); /* affine order = 1 terms = 3 */ case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x*x ); case 5: return( y*y ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x*x ); case 7: return( x*x*y ); case 8: return( x*y*y ); case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x*x ); case 11: return( x*x*x*y ); case 12: return( x*x*y*y ); case 13: return( x*y*y*y ); case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x*x ); case 16: return( x*x*x*x*y ); case 17: return( x*x*x*y*y ); case 18: return( x*x*y*y*y ); case 19: return( x*y*y*y*y ); case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */ } return( 0 ); /* should never happen */ } static const char *poly_basis_str(ssize_t n) { /* return the result for this polynomial term */ switch(n) { case 0: return(""); /* constant */ case 1: return("*ii"); case 2: return("*jj"); /* affine order = 1 terms = 3 */ case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */ case 4: return("*ii*ii"); case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */ case 6: return("*ii*ii*ii"); case 7: return("*ii*ii*jj"); case 8: return("*ii*jj*jj"); case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */ case 10: return("*ii*ii*ii*ii"); case 11: return("*ii*ii*ii*jj"); case 12: return("*ii*ii*jj*jj"); case 13: return("*ii*jj*jj*jj"); case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */ case 15: return("*ii*ii*ii*ii*ii"); case 16: return("*ii*ii*ii*ii*jj"); case 17: return("*ii*ii*ii*jj*jj"); case 18: return("*ii*ii*jj*jj*jj"); case 19: return("*ii*jj*jj*jj*jj"); case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */ } return( "UNKNOWN" ); /* should never happen */ } static double poly_basis_dx(ssize_t n, double x, double y) { /* polynomial term for x derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 1.0 ); case 2: return( 0.0 ); /* affine order = 1 terms = 3 */ case 3: return( y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x ); case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x ); case 7: return( x*y ); case 8: return( y*y ); case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x ); case 11: return( x*x*y ); case 12: return( x*y*y ); case 13: return( y*y*y ); case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x ); case 16: return( x*x*x*y ); case 17: return( x*x*y*y ); case 18: return( x*y*y*y ); case 19: return( y*y*y*y ); case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */ } return( 0.0 ); /* should never happen */ } static double poly_basis_dy(ssize_t n, double x, double y) { /* polynomial term for y derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 0.0 ); case 2: return( 1.0 ); /* affine order = 1 terms = 3 */ case 3: return( x ); /* bilinear order = 1.5 terms = 4 */ case 4: return( 0.0 ); case 5: return( y ); /* quadratic order = 2 terms = 6 */ default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */ } /* NOTE: the only reason that last is not true for 'quadratic' is due to the re-arrangement of terms to allow for 'bilinear' */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n e T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffineTransformImage() transforms an image as dictated by the affine matrix. % It allocates the memory necessary for the new Image structure and returns % a pointer to the new image. % % The format of the AffineTransformImage method is: % % Image *AffineTransformImage(const Image *image, % AffineMatrix *affine_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o affine_matrix: the affine matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AffineTransformImage(const Image *image, const AffineMatrix *affine_matrix,ExceptionInfo *exception) { double distort[6]; Image *deskew_image; /* Affine transform image. */ assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(affine_matrix != (AffineMatrix *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); distort[0]=affine_matrix->sx; distort[1]=affine_matrix->rx; distort[2]=affine_matrix->ry; distort[3]=affine_matrix->sy; distort[4]=affine_matrix->tx; distort[5]=affine_matrix->ty; deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort, MagickTrue,exception); return(deskew_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e n e r a t e C o e f f i c i e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GenerateCoefficients() takes user provided input arguments and generates % the coefficients, needed to apply the specific distortion for either % distorting images (generally using control points) or generating a color % gradient from sparsely separated color points. % % The format of the GenerateCoefficients() method is: % % Image *GenerateCoefficients(const Image *image,DistortMethod method, % const size_t number_arguments,const double *arguments, % size_t number_values, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion/ sparse gradient % % o number_arguments: the number of arguments given. % % o arguments: the arguments for this distortion method. % % o number_values: the style and format of given control points, (caller type) % 0: 2 dimensional mapping of control points (Distort) % Format: u,v,x,y where u,v is the 'source' of the % the color to be plotted, for DistortImage() % N: Interpolation of control points with N values (usally r,g,b) % Format: x,y,r,g,b mapping x,y to color values r,g,b % IN future, variable number of values may be given (1 to N) % % o exception: return any errors or warnings in this structure % % Note that the returned array of double values must be freed by the % calling method using RelinquishMagickMemory(). This however may change in % the future to require a more 'method' specific method. % % Because of this this method should not be classed as stable or used % outside other MagickCore library methods. */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static double *GenerateCoefficients(const Image *image, DistortMethod *method,const size_t number_arguments,const double *arguments, size_t number_values,ExceptionInfo *exception) { double *coeff; register size_t i; size_t number_coeff, /* number of coefficients to return (array size) */ cp_size, /* number floating point numbers per control point */ cp_x,cp_y, /* the x,y indexes for control point */ cp_values; /* index of values for this control point */ /* number_values Number of values given per control point */ if ( number_values == 0 ) { /* Image distortion using control points (or other distortion) That is generate a mapping so that x,y->u,v given u,v,x,y */ number_values = 2; /* special case: two values of u,v */ cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */ cp_x = 2; /* location of x,y in input control values */ cp_y = 3; /* NOTE: cp_values, also used for later 'reverse map distort' tests */ } else { cp_x = 0; /* location of x,y in input control values */ cp_y = 1; cp_values = 2; /* and the other values are after x,y */ /* Typically in this case the values are R,G,B color values */ } cp_size = number_values+2; /* each CP defintion involves this many numbers */ /* If not enough control point pairs are found for specific distortions fall back to Affine distortion (allowing 0 to 3 point pairs) */ if ( number_arguments < 4*cp_size && ( *method == BilinearForwardDistortion || *method == BilinearReverseDistortion || *method == PerspectiveDistortion ) ) *method = AffineDistortion; number_coeff=0; switch (*method) { case AffineDistortion: /* also BarycentricColorInterpolate: */ number_coeff=3*number_values; break; case PolynomialDistortion: /* number of coefficents depend on the given polynomal 'order' */ i = poly_number_terms(arguments[0]); number_coeff = 2 + i*number_values; if ( i == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Polynomial", "Invalid order, should be interger 1 to 5, or 1.5"); return((double *) NULL); } if ( number_arguments < 1+i*cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Polynomial", (double) i); return((double *) NULL); } break; case BilinearReverseDistortion: number_coeff=4*number_values; break; /* The rest are constants as they are only used for image distorts */ case BilinearForwardDistortion: number_coeff=10; /* 2*4 coeff plus 2 constants */ cp_x = 0; /* Reverse src/dest coords for forward mapping */ cp_y = 1; cp_values = 2; break; #if 0 case QuadraterialDistortion: number_coeff=19; /* BilinearForward + BilinearReverse */ #endif break; case ShepardsDistortion: number_coeff=1; /* The power factor to use */ break; case ArcDistortion: number_coeff=5; break; case ScaleRotateTranslateDistortion: case AffineProjectionDistortion: case Plane2CylinderDistortion: case Cylinder2PlaneDistortion: number_coeff=6; break; case PolarDistortion: case DePolarDistortion: number_coeff=8; break; case PerspectiveDistortion: case PerspectiveProjectionDistortion: number_coeff=9; break; case BarrelDistortion: case BarrelInverseDistortion: number_coeff=10; break; default: perror("unknown method given"); /* just fail assertion */ } /* allocate the array of coefficients needed */ coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff)); if (coeff == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "GenerateCoefficients"); return((double *) NULL); } /* zero out coefficients array */ for (i=0; i < number_coeff; i++) coeff[i] = 0.0; switch (*method) { case AffineDistortion: { /* Affine Distortion v = c0*x + c1*y + c2 for each 'value' given Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Affine", 1.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* handle special cases of not enough arguments */ if ( number_arguments == cp_size ) { /* Only 1 CP Set Given */ if ( cp_values == 0 ) { /* image distortion - translate the image */ coeff[0] = 1.0; coeff[2] = arguments[0] - arguments[2]; coeff[4] = 1.0; coeff[5] = arguments[1] - arguments[3]; } else { /* sparse gradient - use the values directly */ for (i=0; i<number_values; i++) coeff[i*3+2] = arguments[cp_values+i]; } } else { /* 2 or more points (usally 3) given. Solve a least squares simultaneous equation for coefficients. */ double **matrix, **vectors, terms[3]; MagickBooleanType status; /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(3UL,3UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*3]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),3UL,number_values); } if ( number_arguments == 2*cp_size ) { /* Only two pairs were given, but we need 3 to solve the affine. Fake extra coordinates by rotating p1 around p0 by 90 degrees. x2 = x0 - (y1-y0) y2 = y0 + (x1-x0) */ terms[0] = arguments[cp_x] - ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */ terms[1] = arguments[cp_y] + + ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */ terms[2] = 1; /* 1 */ if ( cp_values == 0 ) { /* Image Distortion - rotate the u,v coordients too */ double uv2[2]; uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */ uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */ LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL); } else { /* Sparse Gradient - use values of p0 for linear gradient */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[cp_values]),3UL,number_values); } } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,3UL,number_values); matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } } return(coeff); } case AffineProjectionDistortion: { /* Arguments: Affine Matrix (forward mapping) Arguments sx, rx, ry, sy, tx, ty Where u = sx*x + ry*y + tx v = rx*x + sy*y + ty Returns coefficients (in there inverse form) ordered as... sx ry tx rx sy ty AffineProjection Distortion Notes... + Will only work with a 2 number_values for Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ double inverse[8]; if (number_arguments != 6) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs 6 coeff values'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */ for(i=0; i<6UL; i++ ) inverse[i] = arguments[i]; AffineArgsToCoefficients(inverse); /* map into coefficents */ InvertAffineCoefficients(inverse, coeff); /* invert */ *method = AffineDistortion; return(coeff); } case ScaleRotateTranslateDistortion: { /* Scale, Rotate and Translate Distortion An alternative Affine Distortion Argument options, by number of arguments given: 7: x,y, sx,sy, a, nx,ny 6: x,y, s, a, nx,ny 5: x,y, sx,sy, a 4: x,y, s, a 3: x,y, a 2: s, a 1: a Where actions are (in order of application) x,y 'center' of transforms (default = image center) sx,sy scale image by this amount (default = 1) a angle of rotation (argument required) nx,ny move 'center' here (default = x,y or no movement) And convert to affine mapping coefficients ScaleRotateTranslate Distortion Notes... + Does not use a set of CPs in any normal way + Will only work with a 2 number_valuesal Image Distortion + Cannot be used for generating a sparse gradient (interpolation) */ double cosine, sine, x,y,sx,sy,a,nx,ny; /* set default center, and default scale */ x = nx = (double)(image->columns)/2.0 + (double)image->page.x; y = ny = (double)(image->rows)/2.0 + (double)image->page.y; sx = sy = 1.0; switch ( number_arguments ) { case 0: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs at least 1 argument'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); case 1: a = arguments[0]; break; case 2: sx = sy = arguments[0]; a = arguments[1]; break; default: x = nx = arguments[0]; y = ny = arguments[1]; switch ( number_arguments ) { case 3: a = arguments[2]; break; case 4: sx = sy = arguments[2]; a = arguments[3]; break; case 5: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; break; case 6: sx = sy = arguments[2]; a = arguments[3]; nx = arguments[4]; ny = arguments[5]; break; case 7: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; nx = arguments[5]; ny = arguments[6]; break; default: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Too Many Arguments (7 or less)'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } break; } /* Trap if sx or sy == 0 -- image is scaled out of existance! */ if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Zero Scale Given'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Save the given arguments as an affine distortion */ a=DegreesToRadians(a); cosine=cos(a); sine=sin(a); *method = AffineDistortion; coeff[0]=cosine/sx; coeff[1]=sine/sx; coeff[2]=x-nx*coeff[0]-ny*coeff[1]; coeff[3]=(-sine)/sy; coeff[4]=cosine/sy; coeff[5]=y-nx*coeff[3]-ny*coeff[4]; return(coeff); } case PerspectiveDistortion: { /* Perspective Distortion (a ratio of affine distortions) p(x,y) c0*x + c1*y + c2 u = ------ = ------------------ r(x,y) c6*x + c7*y + 1 q(x,y) c3*x + c4*y + c5 v = ------ = ------------------ r(x,y) c6*x + c7*y + 1 c8 = Sign of 'r', or the denominator affine, for the actual image. This determines what part of the distorted image is 'ground' side of the horizon, the other part is 'sky' or invalid. Valid values are +1.0 or -1.0 only. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... Perspective Distortion Notes... + Can be thought of as ratio of 3 affine transformations + Not separatable: r() or c6 and c7 are used by both equations + All 8 coefficients must be determined simultaniously + Will only work with a 2 number_valuesal Image Distortion + Can not be used for generating a sparse gradient (interpolation) + It is not linear, but is simple to generate an inverse + All lines within an image remain lines. + but distances between points may vary. */ double **matrix, *vectors[1], terms[8]; size_t cp_u = cp_values, cp_v = cp_values+1; MagickBooleanType status; if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* fake 1x8 vectors matrix directly using the coefficients array */ vectors[0] = &(coeff[0]); /* 8x8 least-squares matrix (zeroed) */ matrix = AcquireMagickMatrix(8UL,8UL); if (matrix == (double **) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* Add control points for least squares solving */ for (i=0; i < number_arguments; i+=4) { terms[0]=arguments[i+cp_x]; /* c0*x */ terms[1]=arguments[i+cp_y]; /* c1*y */ terms[2]=1.0; /* c2*1 */ terms[3]=0.0; terms[4]=0.0; terms[5]=0.0; terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */ terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]), 8UL,1UL); terms[0]=0.0; terms[1]=0.0; terms[2]=0.0; terms[3]=arguments[i+cp_x]; /* c3*x */ terms[4]=arguments[i+cp_y]; /* c4*y */ terms[5]=1.0; /* c5*1 */ terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */ terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]), 8UL,1UL); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,8UL,1UL); matrix = RelinquishMagickMatrix(matrix, 8UL); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image coordinate (first control point) in destination for determination of what part of view is 'ground'. */ coeff[8] = coeff[6]*arguments[cp_x] + coeff[7]*arguments[cp_y] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; return(coeff); } case PerspectiveProjectionDistortion: { /* Arguments: Perspective Coefficents (forward mapping) */ if (number_arguments != 8) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'Needs 8 coefficient values'", CommandOptionToMnemonic(MagickDistortOptions, *method)); return((double *) NULL); } /* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */ InvertPerspectiveCoefficients(arguments, coeff); /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image cocodinate in destination for determination. For a forward mapped perspective the images 0,0 coord will map to c2,c5 in the distorted image, so set the sign of denominator of that. */ coeff[8] = coeff[6]*arguments[2] + coeff[7]*arguments[5] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; *method = PerspectiveDistortion; return(coeff); } case BilinearForwardDistortion: case BilinearReverseDistortion: { /* Bilinear Distortion (Forward mapping) v = c0*x + c1*y + c2*x*y + c3; for each 'value' given This is actually a simple polynomial Distortion! The difference however is when we need to reverse the above equation to generate a BilinearForwardDistortion (see below). Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ double **matrix, **vectors, terms[4]; MagickBooleanType status; /* check the number of arguments */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(4UL,4UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x4 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*4]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = terms[0]*terms[1]; /* x*y */ terms[3] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),4UL,number_values); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,4UL,number_values); matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( *method == BilinearForwardDistortion ) { /* Bilinear Forward Mapped Distortion The above least-squares solved for coefficents but in the forward direction, due to changes to indexing constants. i = c0*x + c1*y + c2*x*y + c3; j = c4*x + c5*y + c6*x*y + c7; where i,j are in the destination image, NOT the source. Reverse Pixel mapping however needs to use reverse of these functions. It required a full page of algbra to work out the reversed mapping formula, but resolves down to the following... c8 = c0*c5-c1*c4; c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula i = i - c3; j = j - c7; b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0 c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a) r = b*b - c9*(c+c); if ( c9 != 0 ) y = ( -b + sqrt(r) ) / c9; else y = -c/b; x = ( i - c1*y) / ( c1 - c2*y ); NB: if 'r' is negative there is no solution! NB: the sign of the sqrt() should be negative if image becomes flipped or flopped, or crosses over itself. NB: techniqually coefficient c5 is not needed, anymore, but kept for completness. See Anthony Thyssen <A.Thyssen@griffith.edu.au> or Fred Weinhaus <fmw@alink.net> for more details. */ coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4]; coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]); } return(coeff); } #if 0 case QuadrilateralDistortion: { /* Map a Quadrilateral to a unit square using BilinearReverse Then map that unit square back to the final Quadrilateral using BilinearForward. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ /* UNDER CONSTRUCTION */ return(coeff); } #endif case PolynomialDistortion: { /* Polynomial Distortion First two coefficents are used to hole global polynomal information c0 = Order of the polynimial being created c1 = number_of_terms in one polynomial equation Rest of the coefficients map to the equations.... v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ... for each 'value' (number_values of them) given. As such total coefficients = 2 + number_terms * number_values Input Arguments are sets of control points... For Distort Images order [u,v, x,y] ... For Sparse Gradients order [x,y, r,g,b] ... Polynomial Distortion Notes... + UNDER DEVELOPMENT -- Do not expect this to remain as is. + Currently polynomial is a reversed mapped distortion. + Order 1.5 is fudged to map into a bilinear distortion. though it is not the same order as that distortion. */ double **matrix, **vectors, *terms; size_t nterms; /* number of polynomial terms per number_values */ register ssize_t j; MagickBooleanType status; /* first two coefficients hold polynomial order information */ coeff[0] = arguments[0]; coeff[1] = (double) poly_number_terms(arguments[0]); nterms = (size_t) coeff[1]; /* create matrix, a fake vectors matrix, and least sqs terms */ matrix = AcquireMagickMatrix(nterms,nterms); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms)); if (matrix == (double **) NULL || vectors == (double **) NULL || terms == (double *) NULL ) { matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); terms = (double *) RelinquishMagickMemory(terms); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[2+i*nterms]); /* Add given control point pairs for least squares solving */ for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */ for (j=0; j < (ssize_t) nterms; j++) terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]); LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),nterms,number_values); } terms = (double *) RelinquishMagickMemory(terms); /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,nterms,number_values); matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } return(coeff); } case ArcDistortion: { /* Arc Distortion Args: arc_width rotate top_edge_radius bottom_edge_radius All but first argument are optional arc_width The angle over which to arc the image side-to-side rotate Angle to rotate image from vertical center top_radius Set top edge of source image at this radius bottom_radius Set bootom edge to this radius (radial scaling) By default, if the radii arguments are nor provided the image radius is calculated so the horizontal center-line is fits the given arc without scaling. The output image size is ALWAYS adjusted to contain the whole image, and an offset is given to position image relative to the 0,0 point of the origin, allowing users to use relative positioning onto larger background (via -flatten). The arguments are converted to these coefficients c0: angle for center of source image c1: angle scale for mapping to source image c2: radius for top of source image c3: radius scale for mapping source image c4: centerline of arc within source image Note the coefficients use a center angle, so asymptotic join is furthest from both sides of the source image. This also means that for arc angles greater than 360 the sides of the image will be trimmed equally. Arc Distortion Notes... + Does not use a set of CPs + Will only work with Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Arc Angle Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Outer Radius Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } coeff[0] = -MagickPI2; /* -90, place at top! */ if ( number_arguments >= 1 ) coeff[1] = DegreesToRadians(arguments[0]); else coeff[1] = MagickPI2; /* zero arguments - center is at top */ if ( number_arguments >= 2 ) coeff[0] += DegreesToRadians(arguments[1]); coeff[0] /= Magick2PI; /* normalize radians */ coeff[0] -= MagickRound(coeff[0]); coeff[0] *= Magick2PI; /* de-normalize back to radians */ coeff[3] = (double)image->rows-1; coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0; if ( number_arguments >= 3 ) { if ( number_arguments >= 4 ) coeff[3] = arguments[2] - arguments[3]; else coeff[3] *= arguments[2]/coeff[2]; coeff[2] = arguments[2]; } coeff[4] = ((double)image->columns-1.0)/2.0; return(coeff); } case PolarDistortion: case DePolarDistortion: { /* (De)Polar Distortion (same set of arguments) Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato DePolar can also have the extra arguments of Width, Height Coefficients 0 to 5 is the sanatized version first 6 input args Coefficient 6 is the angle to coord ratio and visa-versa Coefficient 7 is the radius to coord ratio and visa-versa WARNING: It is possible for Radius max<min and/or Angle from>to */ if ( number_arguments == 3 || ( number_arguments > 6 && *method == PolarDistortion ) || number_arguments > 8 ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* Rmax - if 0 calculate appropriate value */ if ( number_arguments >= 1 ) coeff[0] = arguments[0]; else coeff[0] = 0.0; /* Rmin - usally 0 */ coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0; /* Center X,Y */ if ( number_arguments >= 4 ) { coeff[2] = arguments[2]; coeff[3] = arguments[3]; } else { /* center of actual image */ coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; } /* Angle from,to - about polar center 0 is downward */ coeff[4] = -MagickPI; if ( number_arguments >= 5 ) coeff[4] = DegreesToRadians(arguments[4]); coeff[5] = coeff[4]; if ( number_arguments >= 6 ) coeff[5] = DegreesToRadians(arguments[5]); if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon ) coeff[5] += Magick2PI; /* same angle is a full circle */ /* if radius 0 or negative, its a special value... */ if ( coeff[0] < MagickEpsilon ) { /* Use closest edge if radius == 0 */ if ( fabs(coeff[0]) < MagickEpsilon ) { coeff[0]=MagickMin(fabs(coeff[2]-image->page.x), fabs(coeff[3]-image->page.y)); coeff[0]=MagickMin(coeff[0], fabs(coeff[2]-image->page.x-image->columns)); coeff[0]=MagickMin(coeff[0], fabs(coeff[3]-image->page.y-image->rows)); } /* furthest diagonal if radius == -1 */ if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) { double rx,ry; rx = coeff[2]-image->page.x; ry = coeff[3]-image->page.y; coeff[0] = rx*rx+ry*ry; ry = coeff[3]-image->page.y-image->rows; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); rx = coeff[2]-image->page.x-image->columns; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); ry = coeff[3]-image->page.y; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); coeff[0] = sqrt(coeff[0]); } } /* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */ if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon || (coeff[0]-coeff[1]) < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid Radius", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* converstion ratios */ if ( *method == PolarDistortion ) { coeff[6]=(double) image->columns/(coeff[5]-coeff[4]); coeff[7]=(double) image->rows/(coeff[0]-coeff[1]); } else { /* *method == DePolarDistortion */ coeff[6]=(coeff[5]-coeff[4])/image->columns; coeff[7]=(coeff[0]-coeff[1])/image->rows; } return(coeff); } case Cylinder2PlaneDistortion: case Plane2CylinderDistortion: { /* 3D Cylinder to/from a Tangential Plane Projection between a clinder and flat plain from a point on the center line of the cylinder. The two surfaces coincide in 3D space at the given centers of distortion (perpendicular to projection point) on both images. Args: FOV_arc_width Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y FOV (Field Of View) the angular field of view of the distortion, across the width of the image, in degrees. The centers are the points of least distortion in the input and resulting images. These centers are however determined later. Coeff 0 is the FOV angle of view of image width in radians Coeff 1 is calculated radius of cylinder. Coeff 2,3 center of distortion of input image Coefficents 4,5 Center of Distortion of dest (determined later) */ if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid FOV Angle", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } coeff[0] = DegreesToRadians(arguments[0]); if ( *method == Cylinder2PlaneDistortion ) /* image is curved around cylinder, so FOV angle (in radians) * scales directly to image X coordinate, according to its radius. */ coeff[1] = (double) image->columns/coeff[0]; else /* radius is distance away from an image with this angular FOV */ coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) ); coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; coeff[4] = coeff[2]; coeff[5] = coeff[3]; /* assuming image size is the same */ return(coeff); } case BarrelDistortion: case BarrelInverseDistortion: { /* Barrel Distortion Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd BarrelInv Distortion Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D) Where Rd is the normalized radius from corner to middle of image Input Arguments are one of the following forms (number of arguments)... 3: A,B,C 4: A,B,C,D 5: A,B,C X,Y 6: A,B,C,D X,Y 8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy 10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y Returns 10 coefficent values, which are de-normalized (pixel scale) Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc */ /* Radius de-normalization scaling factor */ double rscale = 2.0/MagickMin((double) image->columns,(double) image->rows); /* sanity check number of args must = 3,4,5,6,8,10 or error */ if ( (number_arguments < 3) || (number_arguments == 7) || (number_arguments == 9) || (number_arguments > 10) ) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* A,B,C,D coefficients */ coeff[0] = arguments[0]; coeff[1] = arguments[1]; coeff[2] = arguments[2]; if ((number_arguments == 3) || (number_arguments == 5) ) coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2]; else coeff[3] = arguments[3]; /* de-normalize the coefficients */ coeff[0] *= pow(rscale,3.0); coeff[1] *= rscale*rscale; coeff[2] *= rscale; /* Y coefficients: as given OR same as X coefficients */ if ( number_arguments >= 8 ) { coeff[4] = arguments[4] * pow(rscale,3.0); coeff[5] = arguments[5] * rscale*rscale; coeff[6] = arguments[6] * rscale; coeff[7] = arguments[7]; } else { coeff[4] = coeff[0]; coeff[5] = coeff[1]; coeff[6] = coeff[2]; coeff[7] = coeff[3]; } /* X,Y Center of Distortion (image coodinates) */ if ( number_arguments == 5 ) { coeff[8] = arguments[3]; coeff[9] = arguments[4]; } else if ( number_arguments == 6 ) { coeff[8] = arguments[4]; coeff[9] = arguments[5]; } else if ( number_arguments == 10 ) { coeff[8] = arguments[8]; coeff[9] = arguments[9]; } else { /* center of the image provided (image coodinates) */ coeff[8] = (double)image->columns/2.0 + image->page.x; coeff[9] = (double)image->rows/2.0 + image->page.y; } return(coeff); } case ShepardsDistortion: { /* Shepards Distortion input arguments are the coefficents! Just check the number of arguments is valid! Args: u1,v1, x1,y1, ... OR : u1,v1, r1,g1,c1, ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'requires CP's (4 numbers each)'", CommandOptionToMnemonic(MagickDistortOptions, *method)); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* User defined weighting power for Shepard's Method */ { const char *artifact=GetImageArtifact(image,"shepards:power"); if ( artifact != (const char *) NULL ) { coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0; if ( coeff[0] < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s", "-define shepards:power" ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } } else coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */ } return(coeff); } default: break; } /* you should never reach this point */ perror("no method handler"); /* just fail assertion */ return((double *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s t o r t R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortResizeImage() resize image using the equivalent but slower image % distortion operator. The filter is applied using a EWA cylindrical % resampling. But like resize the final image size is limited to whole pixels % with no effects by virtual-pixels on the result. % % Note that images containing a transparency channel will be twice as slow to % resize as images one without transparency. % % The format of the DistortResizeImage method is: % % Image *DistortResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *DistortResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { #define DistortResizeImageTag "Distort/Image" Image *resize_image, *tmp_image; RectangleInfo crop_area; double distort_args[12]; VirtualPixelMethod vp_save; /* Distort resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); /* Do not short-circuit this resize if final image size is unchanged */ (void) memset(distort_args,0,sizeof(distort_args)); distort_args[4]=(double) image->columns; distort_args[6]=(double) columns; distort_args[9]=(double) image->rows; distort_args[11]=(double) rows; vp_save=GetImageVirtualPixelMethod(image); tmp_image=CloneImage(image,0,0,MagickTrue,exception); if (tmp_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception); if (image->alpha_trait == UndefinedPixelTrait) { /* Image has not transparency channel, so we free to use it */ (void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel, exception); } else { /* Image has transparency so handle colors and alpha separatly. Basically we need to separate Virtual-Pixel alpha in the resized image, so only the actual original images alpha channel is used. distort alpha channel separately */ Image *resize_alpha; (void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception); (void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception); resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_alpha == (Image *) NULL) return((Image *) NULL); /* distort the actual image containing alpha + VP alpha */ tmp_image=CloneImage(image,0,0,MagickTrue,exception); if (tmp_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image, TransparentVirtualPixelMethod,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_image == (Image *) NULL) { resize_alpha=DestroyImage(resize_alpha); return((Image *) NULL); } /* replace resize images alpha with the separally distorted alpha */ (void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception); (void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception); (void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp, MagickTrue,0,0,exception); resize_alpha=DestroyImage(resize_alpha); } (void) SetImageVirtualPixelMethod(resize_image,vp_save,exception); /* Clean up the results of the Distortion */ crop_area.width=columns; crop_area.height=rows; crop_area.x=0; crop_area.y=0; tmp_image=resize_image; resize_image=CropImage(tmp_image,&crop_area,exception); tmp_image=DestroyImage(tmp_image); if (resize_image != (Image *) NULL) { resize_image->alpha_trait=image->alpha_trait; resize_image->compose=image->compose; resize_image->page.width=0; resize_image->page.height=0; } return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s t o r t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortImage() distorts an image using various distortion methods, by % mapping color lookups of the source image to a new destination image % usally of the same size as the source image, unless 'bestfit' is set to % true. % % If 'bestfit' is enabled, and distortion allows it, the destination image is % adjusted to ensure the whole source 'image' will just fit within the final % destination image, which will be sized and offset accordingly. Also in % many cases the virtual offset of the source image will be taken into % account in the mapping. % % If the '-verbose' control option has been set print to standard error the % equicelent '-fx' formula with coefficients for the function, if practical. % % The format of the DistortImage() method is: % % Image *DistortImage(const Image *image,const DistortMethod method, % const size_t number_arguments,const double *arguments, % MagickBooleanType bestfit, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion. % % ArcDistortion always ignores source image offset, and always % 'bestfit' the destination image with the top left corner offset % relative to the polar mapping center. % % Affine, Perspective, and Bilinear, do least squares fitting of the % distrotion when more than the minimum number of control point pairs % are provided. % % Perspective, and Bilinear, fall back to a Affine distortion when less % than 4 control point pairs are provided. While Affine distortions % let you use any number of control point pairs, that is Zero pairs is % a No-Op (viewport only) distortion, one pair is a translation and % two pairs of control points do a scale-rotate-translate, without any % shearing. % % o number_arguments: the number of arguments given. % % o arguments: an array of floating point arguments for this method. % % o bestfit: Attempt to 'bestfit' the size of the resulting image. % This also forces the resulting image to be a 'layered' virtual % canvas image. Can be overridden using 'distort:viewport' setting. % % o exception: return any errors or warnings in this structure % % Extra Controls from Image meta-data (artifacts)... % % o "verbose" % Output to stderr alternatives, internal coefficents, and FX % equivalents for the distortion operation (if feasible). % This forms an extra check of the distortion method, and allows users % access to the internal constants IM calculates for the distortion. % % o "distort:viewport" % Directly set the output image canvas area and offest to use for the % resulting image, rather than use the original images canvas, or a % calculated 'bestfit' canvas. % % o "distort:scale" % Scale the size of the output canvas by this amount to provide a % method of Zooming, and for super-sampling the results. % % Other settings that can effect results include % % o 'interpolate' For source image lookups (scale enlargements) % % o 'filter' Set filter to use for area-resampling (scale shrinking). % Set to 'point' to turn off and use 'interpolate' lookup % instead % */ MagickExport Image *DistortImage(const Image *image, DistortMethod method, const size_t number_arguments,const double *arguments, MagickBooleanType bestfit,ExceptionInfo *exception) { #define DistortImageTag "Distort/Image" double *coeff, output_scaling; Image *distort_image; RectangleInfo geometry; /* geometry of the distorted space viewport */ MagickBooleanType viewport_given; PixelInfo invalid; /* the color to assign when distort result is invalid */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Handle Special Compound Distortions */ if ( method == ResizeDistortion ) { if ( number_arguments != 2 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Resize", "Invalid number of args: 2 only"); return((Image *) NULL); } distort_image=DistortResizeImage(image,(size_t)arguments[0], (size_t)arguments[1], exception); return(distort_image); } /* Convert input arguments (usually as control points for reverse mapping) into mapping coefficients to apply the distortion. Note that some distortions are mapped to other distortions, and as such do not require specific code after this point. */ coeff = GenerateCoefficients(image, &method, number_arguments, arguments, 0, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Determine the size and offset for a 'bestfit' destination. Usally the four corners of the source image is enough. */ /* default output image bounds, when no 'bestfit' is requested */ geometry.width=image->columns; geometry.height=image->rows; geometry.x=0; geometry.y=0; if ( method == ArcDistortion ) { bestfit = MagickTrue; /* always calculate a 'best fit' viewport */ } /* Work out the 'best fit', (required for ArcDistortion) */ if ( bestfit ) { PointInfo s,d,min,max; /* source, dest coords --mapping--> min, max coords */ MagickBooleanType fix_bounds = MagickTrue; /* enlarge bounds for VP handling */ s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */ /* defines to figure out the bounds of the distorted image */ #define InitalBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = max.x = p.x; \ min.y = max.y = p.y; \ } #define ExpandBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = MagickMin(min.x,p.x); \ max.x = MagickMax(max.x,p.x); \ min.y = MagickMin(min.y,p.y); \ max.y = MagickMax(max.y,p.y); \ } switch (method) { case AffineDistortion: { double inverse[6]; InvertAffineCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); break; } case PerspectiveDistortion: { double inverse[8], scale; InvertPerspectiveCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); break; } case ArcDistortion: { double a, ca, sa; /* Forward Map Corners */ a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; InitalBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); /* Orthogonal points along top of arc */ for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2); a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) { ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); } /* Convert the angle_to_width and radius_to_height to appropriate scaling factors, to allow faster processing in the mapping function. */ coeff[1] = (double) (Magick2PI*image->columns/coeff[1]); coeff[3] = (double)image->rows/coeff[3]; break; } case PolarDistortion: { if (number_arguments < 2) coeff[2] = coeff[3] = 0.0; min.x = coeff[2]-coeff[0]; max.x = coeff[2]+coeff[0]; min.y = coeff[3]-coeff[0]; max.y = coeff[3]+coeff[0]; /* should be about 1.0 if Rmin = 0 */ coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]); break; } case DePolarDistortion: { /* direct calculation as it needs to tile correctly * for reversibility in a DePolar-Polar cycle */ fix_bounds = MagickFalse; geometry.x = geometry.y = 0; geometry.height = (size_t) ceil(coeff[0]-coeff[1]); geometry.width = (size_t) ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5); /* correct scaling factors relative to new size */ coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */ coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */ break; } case Cylinder2PlaneDistortion: { /* direct calculation so center of distortion is either a pixel * center, or pixel edge. This allows for reversibility of the * distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) ); geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) ); /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case Plane2CylinderDistortion: { /* direct calculation center is either pixel center, or pixel edge * so as to allow reversibility of the image distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */ geometry.height = (size_t) (2*coeff[3]); /* input image height */ /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case ShepardsDistortion: case BilinearForwardDistortion: case BilinearReverseDistortion: #if 0 case QuadrilateralDistortion: #endif case PolynomialDistortion: case BarrelDistortion: case BarrelInverseDistortion: default: /* no calculated bestfit available for these distortions */ bestfit = MagickFalse; fix_bounds = MagickFalse; break; } /* Set the output image geometry to calculated 'bestfit'. Yes this tends to 'over do' the file image size, ON PURPOSE! Do not do this for DePolar which needs to be exact for virtual tiling. */ if ( fix_bounds ) { geometry.x = (ssize_t) floor(min.x-0.5); geometry.y = (ssize_t) floor(min.y-0.5); geometry.width=(size_t) ceil(max.x-geometry.x+0.5); geometry.height=(size_t) ceil(max.y-geometry.y+0.5); } } /* end bestfit destination image calculations */ /* The user provided a 'viewport' expert option which may overrides some parts of the current output image geometry. This also overrides its default 'bestfit' setting. */ { const char *artifact=GetImageArtifact(image,"distort:viewport"); viewport_given = MagickFalse; if ( artifact != (const char *) NULL ) { MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry); if (flags==NoValue) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "distort:viewport",artifact); else viewport_given = MagickTrue; } } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { register ssize_t i; char image_gen[MagickPathExtent]; const char *lookup; /* Set destination image size and virtual offset */ if ( bestfit || viewport_given ) { (void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g " "-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width, (double) geometry.height,(double) geometry.x,(double) geometry.y); lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }"; } else { image_gen[0] = '\0'; /* no destination to generate */ lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */ } switch (method) { case AffineDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortImages"); return((Image *) NULL); } InvertAffineCoefficients(coeff, inverse); CoefficientsToAffineArgs(inverse); (void) FormatLocaleFile(stderr, "Affine Projection:\n"); (void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, "%lf,", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case PerspectiveDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((Image *) NULL); } InvertPerspectiveCoefficients(coeff, inverse); (void) FormatLocaleFile(stderr, "Perspective Projection:\n"); (void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '"); for (i=0; i<4; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "\n "); for (; i<7; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n", coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n", coeff[8] < 0 ? "<" : ">", lookup); break; } case BilinearForwardDistortion: (void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); #if 0 /* for debugging */ (void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n", coeff[8], coeff[9]); #endif (void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", 0.5-coeff[3], 0.5-coeff[7]); (void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n", coeff[6], -coeff[2], coeff[8]); /* Handle Special degenerate (non-quadratic) or trapezoidal case */ if ( coeff[9] != 0 ) { (void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n", -2*coeff[9], coeff[4], -coeff[0]); (void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n", coeff[9]); } else (void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n", -coeff[4], coeff[0]); (void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n", -coeff[1], coeff[0], coeff[2]); if ( coeff[9] != 0 ) (void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup); else (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case BilinearReverseDistortion: #if 0 (void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n"); (void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n"); (void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n", coeff[3], coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n", coeff[7], coeff[4], coeff[5], coeff[6]); #endif (void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case PolynomialDistortion: { size_t nterms = (size_t) coeff[1]; (void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n", coeff[0],(unsigned long) nterms); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n yy ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup); break; } case ArcDistortion: { (void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n"); for ( i=0; i<5; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n"); (void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n", -coeff[0]); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n", coeff[1], coeff[4]); (void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n", coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case PolarDistortion: { (void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", -coeff[2], -coeff[3]); (void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n", -(coeff[4]+coeff[5])/2 ); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n", coeff[6] ); (void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n", -coeff[1], coeff[7] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case DePolarDistortion: { (void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] ); (void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] ); (void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] ); (void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case Cylinder2PlaneDistortion: { (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case Plane2CylinderDistortion: { (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case BarrelDistortion: case BarrelInverseDistortion: { double xc,yc; /* NOTE: This does the barrel roll in pixel coords not image coords ** The internal distortion must do it in image coordinates, ** so that is what the center coeff (8,9) is given in. */ xc = ((double)image->columns-1.0)/2.0 + image->page.x; yc = ((double)image->rows-1.0)/2.0 + image->page.y; (void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n", method == BarrelDistortion ? "" : "Inv"); (void) FormatLocaleFile(stderr, "%s", image_gen); if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 ) (void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n"); else (void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n", coeff[8]-0.5, coeff[9]-0.5); (void) FormatLocaleFile(stderr, " ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n"); (void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[0],coeff[1],coeff[2],coeff[3]); (void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[4],coeff[5],coeff[6],coeff[7]); (void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n"); } default: break; } } /* The user provided a 'scale' expert option will scale the output image size, by the factor given allowing for super-sampling of the distorted image space. Any scaling factors must naturally be halved as a result. */ { const char *artifact; artifact=GetImageArtifact(image,"distort:scale"); output_scaling = 1.0; if (artifact != (const char *) NULL) { output_scaling = fabs(StringToDouble(artifact,(char **) NULL)); geometry.width=(size_t) (output_scaling*geometry.width+0.5); geometry.height=(size_t) (output_scaling*geometry.height+0.5); geometry.x=(ssize_t) (output_scaling*geometry.x+0.5); geometry.y=(ssize_t) (output_scaling*geometry.y+0.5); if ( output_scaling < 0.1 ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s", "-set option:distort:scale" ); return((Image *) NULL); } output_scaling = 1/output_scaling; } } #define ScaleFilter(F,A,B,C,D) \ ScaleResampleFilter( (F), \ output_scaling*(A), output_scaling*(B), \ output_scaling*(C), output_scaling*(D) ) /* Initialize the distort image attributes. */ distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue, exception); if (distort_image == (Image *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); return((Image *) NULL); } /* if image is ColorMapped - change it to DirectClass */ if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse) { coeff=(double *) RelinquishMagickMemory(coeff); distort_image=DestroyImage(distort_image); return((Image *) NULL); } if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) && (IsGrayColorspace(distort_image->colorspace) != MagickFalse)) (void) SetImageColorspace(distort_image,sRGBColorspace,exception); if (distort_image->background_color.alpha_trait != UndefinedPixelTrait) distort_image->alpha_trait=BlendPixelTrait; distort_image->page.x=geometry.x; distort_image->page.y=geometry.y; ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid, exception); { /* ----- MAIN CODE ----- Sample the source image to each pixel in the distort image. */ CacheView *distort_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ResampleFilter **magick_restrict resample_filter; ssize_t j; status=MagickTrue; progress=0; GetPixelInfo(distort_image,&zero); resample_filter=AcquireResampleFilterThreadSet(image, UndefinedVirtualPixelMethod,MagickFalse,exception); distort_view=AcquireAuthenticCacheView(distort_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,distort_image,distort_image->rows,1) #endif for (j=0; j < (ssize_t) distort_image->rows; j++) { const int id = GetOpenMPThreadId(); double validity; /* how mathematically valid is this the mapping */ MagickBooleanType sync; PixelInfo pixel; /* pixel color to assign to distorted image */ PointInfo d, s; /* transform destination image x,y to source image x,y */ register ssize_t i; register Quantum *magick_restrict q; q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; /* Define constant scaling vectors for Affine Distortions Other methods are either variable, or use interpolated lookup */ switch (method) { case AffineDistortion: ScaleFilter( resample_filter[id], coeff[0], coeff[1], coeff[3], coeff[4] ); break; default: break; } /* Initialize default pixel validity * negative: pixel is invalid output 'matte_color' * 0.0 to 1.0: antialiased, mix with resample output * 1.0 or greater: use resampled output. */ validity = 1.0; for (i=0; i < (ssize_t) distort_image->columns; i++) { /* map pixel coordinate to distortion space coordinate */ d.x = (double) (geometry.x+i+0.5)*output_scaling; d.y = (double) (geometry.y+j+0.5)*output_scaling; s = d; /* default is a no-op mapping */ switch (method) { case AffineDistortion: { s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; /* Affine partial derivitives are constant -- set above */ break; } case PerspectiveDistortion: { double p,q,r,abs_r,abs_c6,abs_c7,scale; /* perspective is a ratio of affines */ p=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; q=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; r=coeff[6]*d.x+coeff[7]*d.y+1.0; /* Pixel Validity -- is it a 'sky' or 'ground' pixel */ validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0; /* Determine horizon anti-alias blending */ abs_r = fabs(r)*2; abs_c6 = fabs(coeff[6]); abs_c7 = fabs(coeff[7]); if ( abs_c6 > abs_c7 ) { if ( abs_r < abs_c6*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling); } else if ( abs_r < abs_c7*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling); /* Perspective Sampling Point (if valid) */ if ( validity > 0.0 ) { /* divide by r affine, for perspective scaling */ scale = 1.0/r; s.x = p*scale; s.y = q*scale; /* Perspective Partial Derivatives or Scaling Vectors */ scale *= scale; ScaleFilter( resample_filter[id], (r*coeff[0] - p*coeff[6])*scale, (r*coeff[1] - p*coeff[7])*scale, (r*coeff[3] - q*coeff[6])*scale, (r*coeff[4] - q*coeff[7])*scale ); } break; } case BilinearReverseDistortion: { /* Reversed Mapped is just a simple polynomial */ s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3]; s.y=coeff[4]*d.x+coeff[5]*d.y +coeff[6]*d.x*d.y+coeff[7]; /* Bilinear partial derivitives of scaling vectors */ ScaleFilter( resample_filter[id], coeff[0] + coeff[2]*d.y, coeff[1] + coeff[2]*d.x, coeff[4] + coeff[6]*d.y, coeff[5] + coeff[6]*d.x ); break; } case BilinearForwardDistortion: { /* Forward mapped needs reversed polynomial equations * which unfortunatally requires a square root! */ double b,c; d.x -= coeff[3]; d.y -= coeff[7]; b = coeff[6]*d.x - coeff[2]*d.y + coeff[8]; c = coeff[4]*d.x - coeff[0]*d.y; validity = 1.0; /* Handle Special degenerate (non-quadratic) case * Currently without horizon anti-alising */ if ( fabs(coeff[9]) < MagickEpsilon ) s.y = -c/b; else { c = b*b - 2*coeff[9]*c; if ( c < 0.0 ) validity = 0.0; else s.y = ( -b + sqrt(c) )/coeff[9]; } if ( validity > 0.0 ) s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y ); /* NOTE: the sign of the square root should be -ve for parts where the source image becomes 'flipped' or 'mirrored'. FUTURE: Horizon handling FUTURE: Scaling factors or Deritives (how?) */ break; } #if 0 case BilinearDistortion: /* Bilinear mapping of any Quadrilateral to any Quadrilateral */ /* UNDER DEVELOPMENT */ break; #endif case PolynomialDistortion: { /* multi-ordered polynomial */ register ssize_t k; ssize_t nterms=(ssize_t)coeff[1]; PointInfo du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */ s.x=s.y=du.x=du.y=dv.x=dv.y=0.0; for(k=0; k < nterms; k++) { s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k]; du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k]; du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k]; s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms]; dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms]; dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms]; } ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y ); break; } case ArcDistortion: { /* what is the angle and radius in the destination image */ s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI); s.x -= MagickRound(s.x); /* angle */ s.y = hypot(d.x,d.y); /* radius */ /* Arc Distortion Partial Scaling Vectors Are derived by mapping the perpendicular unit vectors dR and dA*R*2PI rather than trying to map dx and dy The results is a very simple orthogonal aligned ellipse. */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[3] ); /* now scale the angle and radius for source image lookup point */ s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5; s.y = (coeff[2] - s.y) * coeff[3] + image->page.y; break; } case PolarDistortion: { /* 2D Cartesain to Polar View */ d.x -= coeff[2]; d.y -= coeff[3]; s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2; s.x /= Magick2PI; s.x -= MagickRound(s.x); s.x *= Magick2PI; /* angle - relative to centerline */ s.y = hypot(d.x,d.y); /* radius */ /* Polar Scaling vectors are based on mapping dR and dA vectors This results in very simple orthogonal scaling vectors */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[7] ); /* now finish mapping radius/angle to source x,y coords */ s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x; s.y = (s.y-coeff[1])*coeff[7] + image->page.y; break; } case DePolarDistortion: { /* @D Polar to Carteasain */ /* ignore all destination virtual offsets */ d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4]; d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1]; s.x = d.y*sin(d.x) + coeff[2]; s.y = d.y*cos(d.x) + coeff[3]; /* derivatives are usless - better to use SuperSampling */ break; } case Cylinder2PlaneDistortion: { /* 3D Cylinder to Tangential Plane */ double ax, cx; /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; d.x /= coeff[1]; /* x' = x/r */ ax=atan(d.x); /* aa = atan(x/r) = u/r */ cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */ s.x = coeff[1]*ax; /* u = r*atan(x/r) */ s.y = d.y*cx; /* v = y*cos(u/r) */ /* derivatives... (see personnal notes) */ ScaleFilter( resample_filter[id], 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); #if 0 if ( i == 0 && j == 0 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); fflush(stderr); } #endif /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case Plane2CylinderDistortion: { /* 3D Cylinder to Tangential Plane */ /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; /* is pixel valid - horizon of a infinite Virtual-Pixel Plane * (see Anthony Thyssen's personal note) */ validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5; if ( validity > 0.0 ) { double cx,tx; d.x /= coeff[1]; /* x'= x/r */ cx = 1/cos(d.x); /* cx = 1/cos(x/r) */ tx = tan(d.x); /* tx = tan(x/r) */ s.x = coeff[1]*tx; /* u = r * tan(x/r) */ s.y = d.y*cx; /* v = y / cos(x/r) */ /* derivatives... (see Anthony Thyssen's personal notes) */ ScaleFilter( resample_filter[id], cx*cx, 0.0, s.y*cx/coeff[1], cx ); #if 0 /*if ( i == 0 && j == 0 )*/ if ( d.x == 0.5 && d.y == 0.5 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n", coeff[1], (double)(d.x * 180.0/MagickPI), validity ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", cx*cx, 0.0, s.y*cx/coeff[1], cx); fflush(stderr); } #endif } /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case BarrelDistortion: case BarrelInverseDistortion: { /* Lens Barrel Distionion Correction */ double r,fx,fy,gx,gy; /* Radial Polynomial Distortion (de-normalized) */ d.x -= coeff[8]; d.y -= coeff[9]; r = sqrt(d.x*d.x+d.y*d.y); if ( r > MagickEpsilon ) { fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3]; fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7]; gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r; gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r; /* adjust functions and scaling for 'inverse' form */ if ( method == BarrelInverseDistortion ) { fx = 1/fx; fy = 1/fy; gx *= -fx*fx; gy *= -fy*fy; } /* Set the source pixel to lookup and EWA derivative vectors */ s.x = d.x*fx + coeff[8]; s.y = d.y*fy + coeff[9]; ScaleFilter( resample_filter[id], gx*d.x*d.x + fx, gx*d.x*d.y, gy*d.x*d.y, gy*d.y*d.y + fy ); } else { /* Special handling to avoid divide by zero when r==0 ** ** The source and destination pixels match in this case ** which was set at the top of the loop using s = d; ** otherwise... s.x=coeff[8]; s.y=coeff[9]; */ if ( method == BarrelDistortion ) ScaleFilter( resample_filter[id], coeff[3], 0, 0, coeff[7] ); else /* method == BarrelInverseDistortion */ /* FUTURE, trap for D==0 causing division by zero */ ScaleFilter( resample_filter[id], 1.0/coeff[3], 0, 0, 1.0/coeff[7] ); } break; } case ShepardsDistortion: { /* Shepards Method, or Inverse Weighted Distance for displacement around the destination image control points The input arguments are the coefficents to the function. This is more of a 'displacement' function rather than an absolute distortion function. Note: We can not determine derivatives using shepards method so only a point sample interpolatation can be used. */ size_t i; double denominator; denominator = s.x = s.y = 0; for(i=0; i<number_arguments; i+=4) { double weight = ((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2]) + ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]); weight = pow(weight,coeff[0]); /* shepards power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; s.x += (arguments[ i ]-arguments[i+2])*weight; s.y += (arguments[i+1]-arguments[i+3])*weight; denominator += weight; } s.x /= denominator; s.y /= denominator; s.x += d.x; /* make it as relative displacement */ s.y += d.y; break; } default: break; /* use the default no-op given above */ } /* map virtual canvas location back to real image coordinate */ if ( bestfit && method != ArcDistortion ) { s.x -= image->page.x; s.y -= image->page.y; } s.x -= 0.5; s.y -= 0.5; if ( validity <= 0.0 ) { /* result of distortion is an invalid pixel - don't resample */ SetPixelViaPixelInfo(distort_image,&invalid,q); } else { /* resample the source image to find its correct color */ (void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel, exception); /* if validity between 0.0 and 1.0 mix result with invalid pixel */ if ( validity < 1.0 ) { /* Do a blend of sample color and invalid pixel */ /* should this be a 'Blend', or an 'Over' compose */ CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity), &pixel); } SetPixelViaPixelInfo(distort_image,&pixel,q); } q+=GetPixelChannels(distort_image); } sync=SyncCacheViewAuthenticPixels(distort_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DistortImage) #endif proceed=SetImageProgress(image,DistortImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } distort_view=DestroyCacheView(distort_view); resample_filter=DestroyResampleFilterThreadSet(resample_filter); if (status == MagickFalse) distort_image=DestroyImage(distort_image); } /* Arc does not return an offset unless 'bestfit' is in effect And the user has not provided an overriding 'viewport'. */ if ( method == ArcDistortion && !bestfit && !viewport_given ) { distort_image->page.x = 0; distort_image->page.y = 0; } coeff=(double *) RelinquishMagickMemory(coeff); return(distort_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. RotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the RotateImage method is: % % Image *RotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *distort_image, *rotate_image; double angle; PointInfo shear; size_t rotations; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); while (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon)) return(IntegralRotateImage(image,rotations,exception)); distort_image=CloneImage(image,0,0,MagickTrue,exception); if (distort_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod, exception); rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1, &degrees,MagickTrue,exception); distort_image=DestroyImage(distort_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p a r s e C o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SparseColorImage(), given a set of coordinates, interpolates the colors % found at those coordinates, across the whole image, using various methods. % % The format of the SparseColorImage() method is: % % Image *SparseColorImage(const Image *image, % const SparseColorMethod method,const size_t number_arguments, % const double *arguments,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be filled in. % % o method: the method to fill in the gradient between the control points. % % The methods used for SparseColor() are often simular to methods % used for DistortImage(), and even share the same code for determination % of the function coefficents, though with more dimensions (or resulting % values). % % o number_arguments: the number of arguments given. % % o arguments: array of floating point arguments for this method-- % x,y,color_values-- with color_values given as normalized values. % % o exception: return any errors or warnings in this structure % */ MagickExport Image *SparseColorImage(const Image *image, const SparseColorMethod method,const size_t number_arguments, const double *arguments,ExceptionInfo *exception) { #define SparseColorTag "Distort/SparseColor" SparseColorMethod sparse_method; double *coeff; Image *sparse_image; size_t number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Determine number of color values needed per control point */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) number_colors++; /* Convert input arguments into mapping coefficients, this this case we are mapping (distorting) colors, rather than coordinates. */ { DistortMethod distort_method; distort_method=(DistortMethod) method; if ( distort_method >= SentinelDistortion ) distort_method = ShepardsDistortion; /* Pretend to be Shepards */ coeff = GenerateCoefficients(image, &distort_method, number_arguments, arguments, number_colors, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Note some Distort Methods may fall back to other simpler methods, Currently the only fallback of concern is Bilinear to Affine (Barycentric), which is alaso sparse_colr method. This also ensures correct two and one color Barycentric handling. */ sparse_method = (SparseColorMethod) distort_method; if ( distort_method == ShepardsDistortion ) sparse_method = method; /* return non-distort methods to normal */ if ( sparse_method == InverseColorInterpolate ) coeff[0]=0.5; /* sqrt() the squared distance for inverse */ } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; break; } default: /* sparse color method is too complex for FX emulation */ break; } } /* Generate new image for generated interpolated gradient. * ASIDE: Actually we could have just replaced the colors of the original * image, but IM Core policy, is if storage class could change then clone * the image. */ sparse_image=CloneImage(image,0,0,MagickTrue,exception); if (sparse_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse) { /* if image is ColorMapped - change it to DirectClass */ sparse_image=DestroyImage(sparse_image); return((Image *) NULL); } { /* ----- MAIN CODE ----- */ CacheView *sparse_view; MagickBooleanType status; MagickOffsetType progress; ssize_t j; status=MagickTrue; progress=0; sparse_view=AcquireAuthenticCacheView(sparse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sparse_image,sparse_image->rows,1) #endif for (j=0; j < (ssize_t) sparse_image->rows; j++) { MagickBooleanType sync; PixelInfo pixel; /* pixel to assign to distorted image */ register ssize_t i; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(sparse_image,&pixel); for (i=0; i < (ssize_t) image->columns; i++) { GetPixelInfoPixel(image,q,&pixel); switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; break; } case InverseColorInterpolate: case ShepardsColorInterpolate: { /* Inverse (Squared) Distance weights average (IDW) */ size_t k; double denominator; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=0.0; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=0.0; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=0.0; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=0.0; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=0.0; denominator = 0.0; for(k=0; k<number_arguments; k+=2+number_colors) { register ssize_t x=(ssize_t) k+2; double weight = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); weight = pow(weight,coeff[0]); /* inverse of power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red += arguments[x++]*weight; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green += arguments[x++]*weight; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue += arguments[x++]*weight; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black += arguments[x++]*weight; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha += arguments[x++]*weight; denominator += weight; } if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red/=denominator; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green/=denominator; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue/=denominator; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black/=denominator; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha/=denominator; break; } case ManhattanColorInterpolate: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for(k=0; k<number_arguments; k+=2+number_colors) { double distance = fabs((double)i-arguments[ k ]) + fabs((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } case VoronoiColorInterpolate: default: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for (k=0; k<number_arguments; k+=2+number_colors) { double distance = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } } /* set the color directly back into the source image */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=(MagickRealType) ClampPixel(QuantumRange*pixel.red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=(MagickRealType) ClampPixel(QuantumRange*pixel.green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=(MagickRealType) ClampPixel(QuantumRange*pixel.blue); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=(MagickRealType) ClampPixel(QuantumRange*pixel.black); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=(MagickRealType) ClampPixel(QuantumRange*pixel.alpha); SetPixelViaPixelInfo(sparse_image,&pixel,q); q+=GetPixelChannels(sparse_image); } sync=SyncCacheViewAuthenticPixels(sparse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SparseColorImage) #endif proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sparse_view=DestroyCacheView(sparse_view); if (status == MagickFalse) sparse_image=DestroyImage(sparse_image); } coeff = (double *) RelinquishMagickMemory(coeff); return(sparse_image); }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "coders/coders-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { MagickBooleanType supported; PixelChannel channel; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(QuantumScale* GetPixelAlpha(image,q)*opacity),q); else if (opacity > 0) SetPixelAlpha(image,ClampToQuantum((double) QuantumRange* GetPixelAlpha(image,q)/(MagickRealType) opacity),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; Quantum *p; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image, const unsigned char *blocks,size_t length) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatImageProperty(image,"tiff:XResolution","%*g", GetMagickPrecision(),image->resolution.x); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatImageProperty(image,"tiff:YResolution","%*g", GetMagickPrecision(),image->resolution.y); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) psd_info->has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length) { ssize_t count; count=ReadBlob(image,length,(unsigned char *) p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return(count); } static inline void SetPSDPixel(Image *image,const PixelChannel channel, const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; ssize_t index; if (channel == GrayPixelChannel) { index=(ssize_t) pixel; if (packet_size == 1) index=(ssize_t) ScaleQuantumToChar((Quantum) index); index=ConstrainColormapIndex(image,index,exception); SetPixelIndex(image,(Quantum) index,q); } else { index=(ssize_t) GetPixelIndex(image,q); index=ConstrainColormapIndex(image,index,exception); } color=image->colormap+index; if (channel == AlphaPixelChannel) color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); } else SetPixelChannel(image,channel,pixel,q); } static MagickBooleanType ReadPSDChannelPixels(Image *image,const ssize_t row, const PixelChannel channel,const unsigned char *pixels, ExceptionInfo *exception) { Quantum pixel; const unsigned char *p; Quantum *q; ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum(((MagickRealType) QuantumRange)*nibble); } if (image->depth > 1) { SetPSDPixel(image,channel,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channel,packet_size, (((unsigned char)((ssize_t) pixel)) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const PixelChannel channel, ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,y,channel,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image, const PixelChannel channel,MagickOffsetType *sizes, ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,y,channel,pixels,exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static void Unpredict8Bit(const Image *image,unsigned char *pixels, const size_t count,const size_t row_size) { unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { *(p+1)+=*p; p++; } p++; remaining-=row_size; } } static void Unpredict16Bit(const Image *image,unsigned char *pixels, const size_t count,const size_t row_size) { unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; p+=2; } p+=2; remaining-=row_size; } } static void Unpredict32Bit(const Image *image,unsigned char *pixels, unsigned char *output_pixels,const size_t row_size) { unsigned char *p, *q; ssize_t y; size_t offset1, offset2, offset3, remaining; unsigned char *start; offset1=image->columns; offset2=2*offset1; offset3=3*offset1; p=pixels; q=output_pixels; for (y=0; y < (ssize_t) image->rows; y++) { start=p; remaining=row_size; while (--remaining) { *(p+1)+=*p; p++; } p=start; remaining=image->columns; while (remaining--) { *(q++)=*p; *(q++)=*(p+offset1); *(q++)=*(p+offset2); *(q++)=*(p+offset3); p++; } p=start+row_size; } } static MagickBooleanType ReadPSDChannelZip(Image *image, const PixelChannel channel,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; unsigned char *p; size_t count, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { if (packet_size == 1) Unpredict8Bit(image,pixels,count,row_size); else if (packet_size == 2) Unpredict16Bit(image,pixels,count,row_size); else if (packet_size == 4) { unsigned char *output_pixels; output_pixels=(unsigned char *) AcquireQuantumMemory(count, sizeof(*output_pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } Unpredict32Bit(image,pixels,output_pixels,row_size); pixels=(unsigned char *) RelinquishMagickMemory(pixels); pixels=output_pixels; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,y,channel,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel_index,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType end_offset, offset; MagickBooleanType status; PixelChannel channel; end_offset=(MagickOffsetType) layer_info->channel_info[channel_index].size-2; if (layer_info->channel_info[channel_index].supported == MagickFalse) { (void) SeekBlob(image,end_offset,SEEK_CUR); return(MagickTrue); } channel_image=image; channel=layer_info->channel_info[channel_index].channel; mask=(Image *) NULL; if (channel == ReadMaskPixelChannel) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse)) || (layer_info->mask.page.width < 1) || (layer_info->mask.page.height < 1)) { (void) SeekBlob(image,end_offset,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; channel=GrayPixelChannel; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,channel,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,channel,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,channel,compression, (const size_t) end_offset,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+end_offset,SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType GetPixelChannelFromPsdIndex(const PSDInfo *psd_info, ssize_t index,PixelChannel *channel) { *channel=RedPixelChannel; switch (psd_info->mode) { case BitmapMode: case IndexedMode: case GrayscaleMode: { if (index == 1) index=-1; else if (index > 1) index=StartMetaPixelChannel+index-2; break; } case LabMode: case MultichannelMode: case RGBMode: { if (index == 3) index=-1; else if (index > 3) index=StartMetaPixelChannel+index-4; break; } case CMYKMode: { if (index == 4) index=-1; else if (index > 4) index=StartMetaPixelChannel+index-5; break; } } if ((index < -2) || (index >= MaxPixelChannels)) return(MagickFalse); if (index == -1) *channel=AlphaPixelChannel; else if (index == -2) *channel=ReadMaskPixelChannel; else *channel=(PixelChannel) index; return(MagickTrue); } static void SetPsdMetaChannels(Image *image,const PSDInfo *psd_info, const unsigned short channels,ExceptionInfo *exception) { ssize_t number_meta_channels; number_meta_channels=(ssize_t) channels-psd_info->min_channels; if (image->alpha_trait == BlendPixelTrait) number_meta_channels--; if (number_meta_channels > 0) (void) SetPixelMetaChannels(image,(size_t) number_meta_channels,exception); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); SetPsdMetaChannels(layer_info->image,psd_info,layer_info->channels,exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const Image *image, const PSDInfo *psd_info,LayerInfo *layer_info) { int channel_type; size_t blob_size; ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; blob_size=(size_t) GetBlobSize(image); for (i=0; i < (ssize_t) layer_info->channels; i++) { PixelChannel channel; if (layer_info->channel_info[i].size >= blob_size) return(MagickFalse); if (layer_info->channel_info[i].supported == MagickFalse) continue; channel=layer_info->channel_info[i].channel; if ((i == 0) && (psd_info->mode == IndexedMode) && (channel != RedPixelChannel)) return(MagickFalse); if (channel == AlphaPixelChannel) { channel_type|=AlphaChannel; continue; } if (channel == RedPixelChannel) channel_type&=~RedChannel; else if (channel == GreenPixelChannel) channel_type&=~GreenChannel; else if (channel == BluePixelChannel) channel_type&=~BlueChannel; else if (channel == BlackPixelChannel) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info, const ImageInfo *image_info,const size_t index) { if (psd_info->has_merged_image == MagickFalse) return(MagickFalse); if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait=BlendPixelTrait; } static void ParseAdditionalInfo(LayerInfo *layer_info) { char key[5]; size_t remaining_length; unsigned char *p; unsigned int size; p=GetStringInfoDatum(layer_info->info); remaining_length=GetStringInfoLength(layer_info->info); while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) break; if (LocaleNCompare(key,"luni",sizeof(key)) == 0) { unsigned char *name; unsigned int length; length=(unsigned int) (*p++) << 24; length|=(unsigned int) (*p++) << 16; length|=(unsigned int) (*p++) << 8; length|=(unsigned int) (*p++); if (length * 2 > size - 4) break; if (sizeof(layer_info->name) <= length) break; name=layer_info->name; while (length > 0) { /* Only ASCII strings are supported */ if (*p++ != '\0') break; *name++=*p++; length--; } if (length == 0) *name='\0'; break; } else p+=size; remaining_length-=(size_t) size; } } static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image) { char type[4]; MagickSizeType size; ssize_t count; size=GetPSDSize(psd_info,image); if (size != 0) return(size); (void) ReadBlobLong(image); count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) || (LocaleNCompare(type,"Mt32",4) == 0) || (LocaleNCompare(type,"Mtrn",4) == 0))) { size=GetPSDSize(psd_info,image); if (size != 0) return(0); image->alpha_trait=BlendPixelTrait; count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); } if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); return(size); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; ssize_t count, index, i, j, number_layers; size=GetLayerInfoSize(psd_info,image); if (size == 0) { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].supported=GetPixelChannelFromPsdIndex( psd_info,(ssize_t) ReadBlobSignedShort(image), &layer_info[i].channel_info[j].channel); layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].channel, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(image,psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,layer_info[i].blendkey,4); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); ParseAdditionalInfo(&layer_info[i]); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (layer_info[i].channel_info[j].channel == AlphaPixelChannel) { layer_info[i].image->alpha_trait=BlendPixelTrait; break; } } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; index=0; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info,++index) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickBooleanType status; status=IsRightsAuthorized(CoderPolicyDomain,ReadPolicyRights,"PSD"); if (status == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } SetPsdMetaChannels(image,psd_info,psd_info->channels,exception); status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { PixelChannel channel; status=GetPixelChannelFromPsdIndex(psd_info,i,&channel); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageError,"MaximumChannelsExceeded","'%.20g'",(double) i); break; } if (compression == RLE) status=ReadPSDChannelRLE(image,channel,sizes+(i*image->rows),exception); else status=ReadPSDChannelRaw(image,channel,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; ssize_t i; size_t image_list_length; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; switch (psd_info.mode) { case LabMode: { (void) SetImageColorspace(image,LabColorspace,exception); break; } case CMYKMode: { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); break; } case BitmapMode: case GrayscaleMode: case DuotoneMode: { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); break; } case IndexedMode: { psd_info.min_channels=1; break; } case MultichannelMode: { if ((psd_info.channels > 0) && (psd_info.channels < 3)) { psd_info.min_channels=psd_info.channels; (void) SetImageColorspace(image,GRAYColorspace,exception); } break; } } if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); image_list_length=GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1)) psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage( image_info,image,&psd_info,exception); if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } image_list_length=GetImageListLength(image); } if (psd_info.has_merged_image == MagickFalse) { Image *merged; if (image_list_length == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); if (merged == (Image *) NULL) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { const char *option; Image *next; MagickBooleanType replicate_profile; option=GetImageOption(image_info,"psd:replicate-profile"); replicate_profile=IsStringTrue(option); i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse) { (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); if (replicate_profile == MagickFalse) break; } next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; ssize_t i, j; unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; const Quantum *p; ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickBooleanType status; status=IsRightsAuthorized(CoderPolicyDomain,WritePolicyRights,"PSD"); if (status == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; ssize_t i; size_t length, num_channels; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { const char *option; CompressionType compression; MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); option=GetImageOption(image_info,"psd:write-layers"); if (IsStringFalse(option) != MagickTrue) { status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); (void) WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); (void) WriteBlobMSBLong(image,0); /* user mask data */ } /* Write composite image. */ compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
vect-simd-clone-11.c
/* { dg-require-effective-target vect_simd_clones } */ /* { dg-additional-options "-fopenmp-simd" } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ #include "tree-vect.h" #ifndef N #define N 1024 #endif int a[N] __attribute__((aligned (32))); #pragma omp declare simd linear(a) linear(b:3) linear(c:6) notinbranch __attribute__((noinline)) int foo (int a, int b, int c) { return a ^ (b * 512) ^ (c * 512 * 512); } __attribute__((noinline, noclone)) void bar (int *d) { int i, j, k; for (i = 0, j = 0, k = 0; i < N / 2; i++, j++, k += 3) d[i] = foo (j, i * 3, 2 * k + 2); } #if 0 __attribute__((noinline, noclone)) void baz (int *d) { long int i, j, k; for (i = 0, j = 0, k = 0; i < N / 2; i = (int) i + 1, j = (int) j + 1, k = (int) k + 3) d[i] = foo (j, i * 3, 2 * k + 2); } #endif int main () { int i; check_vect (); if (sizeof (int) * __CHAR_BIT__ < 32) return 0; bar (a + 7); for (i = 0; i < N / 2; i++) if (a[i + 7] != (i ^ (i * 3 * 512) ^ (((i * 6) + 2) * 512 * 512))) abort (); bar (a); for (i = 0; i < N / 2; i++) if (a[i] != (i ^ (i * 3 * 512) ^ (((i * 6) + 2) * 512 * 512))) abort (); #if 0 baz (a + 7); for (i = 0; i < N / 2; i++) if (a[i + 7] != (i ^ (i * 3 * 512) ^ (((i * 6) + 2) * 512 * 512))) abort (); baz (a); for (i = 0; i < N / 2; i++) if (a[i] != (i ^ (i * 3 * 512) ^ (((i * 6) + 2) * 512 * 512))) abort (); #endif return 0; }
GB_unop__log_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log_fc64_fc64) // op(A') function: GB (_unop_tran__log_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = clog (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = clog (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = clog (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = clog (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = clog (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hmacSHA256_fmt_plug.c
/* * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * Based on hmac-md5 by Bartavelle * * SIMD added Feb, 2015, JimF. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hmacSHA224; extern struct fmt_main fmt_hmacSHA256; #elif FMT_REGISTERS_H john_register_one(&fmt_hmacSHA224); john_register_one(&fmt_hmacSHA256); #else #include "sha2.h" #include "arch.h" #include "misc.h" #include "common.h" #include "base64_convert.h" #include "formats.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP #include <omp.h> #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 2048 // scaled on core i7-quad HT #endif #else #ifndef OMP_SCALE #define OMP_SCALE 512 // scaled K8-dual HT #endif #endif #endif #include "memdbg.h" #define FORMAT_LABEL "HMAC-SHA256" #define FORMAT_LABEL_224 "HMAC-SHA224" #define FORMAT_NAME "" #define ALGORITHM_NAME "password is key, SHA256 " SHA256_ALGORITHM_NAME #define ALGORITHM_NAME_224 "password is key, SHA224 " SHA256_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define PAD_SIZE 64 #define PAD_SIZE_W (PAD_SIZE/4) #define BINARY_SIZE (256/8) #define BINARY_SIZE_224 (224/8) #define BINARY_ALIGN 4 #ifndef SIMD_COEF_32 #define SALT_LENGTH 1023 #define SALT_ALIGN 1 #else #define SALT_LIMBS 5 /* 5 limbs, 311 bytes */ #define SALT_LENGTH (SALT_LIMBS * PAD_SIZE - 9) #define SALT_ALIGN MEM_ALIGN_SIMD #endif #define CIPHERTEXT_LENGTH (SALT_LENGTH + 1 + BINARY_SIZE * 2) #define CIPHERTEXT_LENGTH_224 (SALT_LENGTH + 1 + BINARY_SIZE_224 * 2) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i&63) & (0xffffffff - 3)) * SIMD_COEF_32 + (3 - ((i&63) & 3)) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE * SIMD_COEF_32) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"The quick brown fox jumps over the lazy dog#f7bc83f430538424b13298e6aa6fb143ef4d59a14946175997479dbc2d1a3cd8", "key"}, {"#b613679a0814d9ec772f95d778c35fc5ff1697c493715653c6c712144292c5ad", ""}, {"Beppe#Grillo#14651BA87C7F7DA88BCE0DF1F89C223975AC0FDF9C35378CB0857A81DFD5C408", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, {"jquYnUyWT5NsbvjQDZXyCxMJB6PryALZdYOZ1bEuagcUmYcbqpx5vOvpxj7VEhqW7OIzHR2O9JLDKrhuDfZxQk9jOENQb4OzEkRZmN8czdGdo7nshdYU1zcdoDGVb3YTCbjeZvazi#c8b4b8a7888787eebca16099fd076092269919bb032bfec48eed7f41d42eba9a", "magnum"}, // JWM hash. {"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjEyMzQ1Njc4OTAsIm5hbWUiOiJKb2huIERvZSIsImFkbWluIjp0cnVlfQ.eoaDVGTClRdfxUZXiPs3f8FmJDkDE_VCQFXqKxpLsts", "secret" }, #ifndef SIMD_COEF_32 {"12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012#ff504b06ee64f3ba7fe503496b451cf46ee34109a62d55cd4bf4f38077ee8145","1234567890" }, {"012345678901234567890123456789012345678901234567890123456789#6ec69f97e81e58b4a28ee13537c84df316cf8a6250e932de1d375e72843b8f9c", "123456"}, {"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123#389c4d8db62dea4c108cf12662da3c9440149800cd1e74f3738ba804024343b7","1234567890" }, {"0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789#090487f586965594ae55d366cc9bc96d9f0ce44e253e975a1ed004c8a5edcf24", "123456"}, #endif {NULL} }; static struct fmt_tests tests_224[] = { {"what do ya want for nothing?#a30e01098bc6dbbf45690f3a7e9e6d0f8bbea2a39e6148008fd05e44", "Jefe"}, {"Beppe#Grillo#926E4A97B401242EF674CEE4C60D9FC6FF73007F871008D4C11F5B95", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, {NULL} }; #ifdef SIMD_COEF_32 static unsigned char *crypt_key; static unsigned char *ipad, *prep_ipad; static unsigned char *opad, *prep_opad; typedef struct cur_salt_t { unsigned char salt[SALT_LIMBS][PAD_SIZE * MAX_KEYS_PER_CRYPT]; int salt_len; } cur_salt_t; static cur_salt_t *cur_salt; static int bufsize; #define SALT_SIZE sizeof(cur_salt_t) #else static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static unsigned char (*opad)[PAD_SIZE]; static unsigned char (*ipad)[PAD_SIZE]; static unsigned char cur_salt[SALT_LENGTH+1]; static SHA256_CTX *ipad_ctx; static SHA256_CTX *opad_ctx; #define SALT_SIZE sizeof(cur_salt) #endif static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int new_keys; #ifdef SIMD_COEF_32 static void clear_keys(void) { memset(ipad, 0x36, bufsize); memset(opad, 0x5C, bufsize); } #endif static void init(struct fmt_main *self, const int B_LEN) { #ifdef SIMD_COEF_32 int i; #endif #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * PAD_SIZE; crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); prep_opad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); for (i = 0; i < self->params.max_keys_per_crypt; ++i) { crypt_key[GETPOS(B_LEN, i)] = 0x80; ((unsigned int*)crypt_key)[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + (i/SIMD_COEF_32) * PAD_SIZE_W * SIMD_COEF_32] = (B_LEN + PAD_SIZE) << 3; } clear_keys(); #else crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); ipad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad)); opad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad)); ipad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad_ctx)); opad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void init_256(struct fmt_main *self) { init(self, BINARY_SIZE); } static void init_224(struct fmt_main *self) { init(self, BINARY_SIZE_224); } static void done(void) { MEM_FREE(saved_plain); #ifdef SIMD_COEF_32 MEM_FREE(prep_opad); MEM_FREE(prep_ipad); #else MEM_FREE(opad_ctx); MEM_FREE(ipad_ctx); #endif MEM_FREE(opad); MEM_FREE(ipad); MEM_FREE(crypt_key); } static char *split(char *ciphertext, int index, struct fmt_main *self, const int B_LEN, const int CT_LEN) { static char out[CIPHERTEXT_LENGTH + 1]; if (strstr(ciphertext, "$SOURCE_HASH$")) return ciphertext; if (!strchr(ciphertext, '#') && strchr(ciphertext, '.') && strchr(ciphertext, '.') != strrchr(ciphertext, '.')) { // Treat this like a JWT hash. Convert into 'normal' hmac-sha256 format. char buf[BINARY_SIZE * 2 + 1], tmp[CIPHERTEXT_LENGTH + 1], *cpi; strnzcpy(tmp, ciphertext, sizeof(tmp)); cpi = strchr(tmp, '.'); cpi = strchr(&cpi[1], '.'); if (cpi - tmp + B_LEN * 2 + 1 > CT_LEN) return ciphertext; *cpi++ = 0; memset(buf, 0, sizeof(buf)); base64_convert(cpi, e_b64_mime, strlen(cpi), buf, e_b64_hex, sizeof(buf), flg_Base64_NO_FLAGS, 0); if (strlen(buf) != B_LEN * 2) return ciphertext; sprintf(out, "%s#%s", tmp, buf); } else strnzcpy(out, ciphertext, sizeof(out)); strlwr(strrchr(out, '#')); return out; } static char *split_256(char *ciphertext, int index, struct fmt_main *self) { return split(ciphertext, index, self, BINARY_SIZE, CIPHERTEXT_LENGTH); } static char *split_224(char *ciphertext, int index, struct fmt_main *self) { return split(ciphertext, index, self, BINARY_SIZE_224, CIPHERTEXT_LENGTH_224); } static int valid(char *ciphertext, struct fmt_main *self, const int B_LEN, const int CT_LEN) { int pos, i; char *p; p = strrchr(ciphertext, '#'); // allow # in salt if (!p && strchr(ciphertext, '.') && strchr(ciphertext, '.') != strrchr(ciphertext, '.')) { if (strlen(ciphertext) > CT_LEN) return 0; ciphertext = split(ciphertext, 0, self, B_LEN, CT_LEN); p = strrchr(ciphertext, '#'); } if (!p || p > &ciphertext[strlen(ciphertext)-1]) return 0; i = (int)(p - ciphertext); if (i > SALT_LENGTH) return 0; pos = i + 1; if (strlen(ciphertext + pos) != B_LEN * 2) return 0; for (i = pos; i < B_LEN * 2 + pos; i++) { if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) || (('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) || (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F')))) return 0; } return 1; } static int valid_256(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, BINARY_SIZE, CIPHERTEXT_LENGTH); } static int valid_224(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, BINARY_SIZE_224, CIPHERTEXT_LENGTH_224); } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 cur_salt = salt; #else strcpy((char*)cur_salt, (char*)salt); #endif } static MAYBE_INLINE void set_key(char *key, int index, const int B_LEN) { int len; #ifdef SIMD_COEF_32 ARCH_WORD_32 *ipadp = (ARCH_WORD_32*)&ipad[GETPOS(3, index)]; ARCH_WORD_32 *opadp = (ARCH_WORD_32*)&opad[GETPOS(3, index)]; const ARCH_WORD_32 *keyp = (ARCH_WORD_32*)key; unsigned int temp; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; if (len > PAD_SIZE) { unsigned char k0[BINARY_SIZE]; SHA256_CTX ctx; int i; if (B_LEN == BINARY_SIZE) { SHA256_Init(&ctx); SHA256_Update(&ctx, key, len); SHA256_Final(k0, &ctx); } else { SHA224_Init(&ctx); SHA224_Update(&ctx, key, len); SHA224_Final(k0, &ctx); } keyp = (unsigned int*)k0; for(i = 0; i < B_LEN / 4; i++, ipadp += SIMD_COEF_32, opadp += SIMD_COEF_32) { temp = JOHNSWAP(*keyp++); *ipadp ^= temp; *opadp ^= temp; } } else while(((temp = JOHNSWAP(*keyp++)) & 0xff000000)) { if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00)) { ((unsigned short*)ipadp)[1] ^= (unsigned short)(temp >> 16); ((unsigned short*)opadp)[1] ^= (unsigned short)(temp >> 16); break; } *ipadp ^= temp; *opadp ^= temp; if (!(temp & 0x000000ff)) break; ipadp += SIMD_COEF_32; opadp += SIMD_COEF_32; } #else int i; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; memset(ipad[index], 0x36, PAD_SIZE); memset(opad[index], 0x5C, PAD_SIZE); if (len > PAD_SIZE) { SHA256_CTX ctx; unsigned char k0[BINARY_SIZE]; if (B_LEN == BINARY_SIZE) { SHA256_Init( &ctx ); SHA256_Update( &ctx, key, len); SHA256_Final( k0, &ctx); } else { SHA224_Init( &ctx ); SHA224_Update( &ctx, key, len); SHA224_Final( k0, &ctx); } len = B_LEN; for(i=0;i<len;i++) { ipad[index][i] ^= k0[i]; opad[index][i] ^= k0[i]; } } else for(i=0;i<len;i++) { ipad[index][i] ^= key[i]; opad[index][i] ^= key[i]; } #endif new_keys = 1; } static void set_key_256(char *key, int index) { set_key(key, index, BINARY_SIZE); } static void set_key_224(char *key, int index) { set_key(key, index, BINARY_SIZE_224); } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int index; for(index = 0; index < count; index++) { // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) if(((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[(index&(SIMD_COEF_32-1))+index/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32]) return 1; } return 0; #else int index = 0; #if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1) for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static MAYBE_INLINE int cmp_one(void *binary, int index, const int B_LEN) { #ifdef SIMD_COEF_32 int i; for(i = 0; i < (B_LEN/4); i++) // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) if (((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[i * SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], B_LEN); #endif } static int cmp_one_256(void *binary, int index) { return cmp_one(binary, index, BINARY_SIZE); } static int cmp_one_224(void *binary, int index) { return cmp_one(binary, index, BINARY_SIZE_224); } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt, #ifdef SIMD_COEF_32 const unsigned EX_FLAGS #else const int B_LEN #endif ) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 unsigned int i, *pclear; if (new_keys) { SIMDSHA256body(&ipad[index * PAD_SIZE], (unsigned int*)&prep_ipad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN|EX_FLAGS); SIMDSHA256body(&opad[index * PAD_SIZE], (unsigned int*)&prep_opad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN|EX_FLAGS); } SIMDSHA256body(cur_salt->salt[0], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&prep_ipad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); for (i = 1; i <= (cur_salt->salt_len + 8) / PAD_SIZE; i++) SIMDSHA256body(cur_salt->salt[i], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&crypt_key[index * PAD_SIZE], SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); if (EX_FLAGS) { // NOTE, SSESHA224 will output 32 bytes. We need the first 28 (plus the 0x80 padding). // so we are forced to 'clean' this crap up, before using the crypt as the input. pclear = (unsigned int*)&crypt_key[(unsigned int)index/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32*4]; for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) pclear[28/4*SIMD_COEF_32+(i&(SIMD_COEF_32-1))+i/SIMD_COEF_32*PAD_SIZE_W*SIMD_COEF_32] = 0x80000000; } SIMDSHA256body(&crypt_key[index * PAD_SIZE], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&prep_opad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); #else SHA256_CTX ctx; // Note, for oSSL, we really only need SHA256_Init and SHA224_Init. From that point // on, SHA256_Update/SHA256_Final can be used. Also, jtr internal sha2.c file works // like that. BUT I am not sure every hash engine works that way, so we are keeping // the 'full' block. if (B_LEN == BINARY_SIZE) { if (new_keys) { SHA256_Init(&ipad_ctx[index]); SHA256_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); SHA256_Init(&opad_ctx[index]); SHA256_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA256_Update( &ctx, cur_salt, strlen( (char*) cur_salt) ); SHA256_Final( (unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA256_Update( &ctx, crypt_key[index], B_LEN); SHA256_Final( (unsigned char*) crypt_key[index], &ctx); } else { if (new_keys) { SHA224_Init(&ipad_ctx[index]); SHA224_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); SHA224_Init(&opad_ctx[index]); SHA224_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA224_Update( &ctx, cur_salt, strlen( (char*) cur_salt) ); SHA224_Final( (unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA224_Update( &ctx, crypt_key[index], B_LEN); SHA224_Final( (unsigned char*) crypt_key[index], &ctx); } #endif } new_keys = 0; return count; } static int crypt_all_256(int *pcount, struct db_salt *salt) { #ifdef SIMD_COEF_32 return crypt_all(pcount, salt, 0); #else return crypt_all(pcount, salt, BINARY_SIZE); #endif } static int crypt_all_224(int *pcount, struct db_salt *salt) { #ifdef SIMD_COEF_32 return crypt_all(pcount, salt, SSEi_CRYPT_SHA224); #else return crypt_all(pcount, salt, BINARY_SIZE_224); #endif } static void *get_binary(char *ciphertext, const int B_LEN) { static union toalign { unsigned char c[BINARY_SIZE]; ARCH_WORD_32 a[1]; } a; unsigned char *realcipher = a.c; int i,pos; for(i=strlen(ciphertext);ciphertext[i]!='#';i--); // allow # in salt pos=i+1; for(i=0;i<B_LEN;i++) realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+pos])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1+pos])]; #ifdef SIMD_COEF_32 alter_endianity(realcipher, B_LEN); #endif return (void*)realcipher; } static void *get_binary_256(char *ciphertext) { return get_binary(ciphertext, BINARY_SIZE); } static void *get_binary_224(char *ciphertext) { return get_binary(ciphertext, BINARY_SIZE_224); } static void *get_salt(char *ciphertext) { static unsigned char salt[SALT_LENGTH+1]; int len; #ifdef SIMD_COEF_32 unsigned int i = 0; static JTR_ALIGN(MEM_ALIGN_SIMD) cur_salt_t cur_salt; int salt_len = 0; #endif // allow # in salt len = strrchr(ciphertext, '#') - ciphertext; memset(salt, 0, sizeof(salt)); memcpy(salt, ciphertext, len); #ifdef SIMD_COEF_32 memset(&cur_salt, 0, sizeof(cur_salt)); while(((unsigned char*)salt)[salt_len]) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = ((unsigned char*)salt)[salt_len]; ++salt_len; } cur_salt.salt_len = salt_len; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = 0x80; ((unsigned int*)cur_salt.salt[(salt_len + 8) / PAD_SIZE])[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32] = (salt_len + PAD_SIZE) << 3; } return &cur_salt; #else return salt; #endif } struct fmt_main fmt_hmacSHA256 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { NULL }, tests }, { init_256, done, fmt_default_reset, fmt_default_prepare, valid_256, split_256, get_binary_256, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, set_key_256, get_key, #ifdef SIMD_COEF_32 clear_keys, #else fmt_default_clear_keys, #endif crypt_all_256, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one_256, cmp_exact } }; struct fmt_main fmt_hmacSHA224 = { { FORMAT_LABEL_224, FORMAT_NAME, ALGORITHM_NAME_224, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_224, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { NULL }, tests_224 }, { init_224, done, fmt_default_reset, fmt_default_prepare, valid_224, split_224, get_binary_224, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, set_key_224, get_key, #ifdef SIMD_COEF_32 clear_keys, #else fmt_default_clear_keys, #endif crypt_all_224, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one_224, cmp_exact } }; #endif /* plugin stanza */
ctl_fragment.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : ctl_fragment.c * Description : Fragment Control Source * * + This is part of libaroma, an embedded ui toolkit. * + 27/06/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_ctl_fragment_c__ #define __libaroma_ctl_fragment_c__ #include <aroma_internal.h> #include "../ui/ui_internal.h" /*************************** CONTROL HANDLERS *********************************/ dword _libaroma_ctl_fragment_msg(LIBAROMA_CONTROLP, LIBAROMA_MSGP); void _libaroma_ctl_fragment_draw(LIBAROMA_CONTROLP, LIBAROMA_CANVASP); void _libaroma_ctl_fragment_destroy(LIBAROMA_CONTROLP); byte _libaroma_ctl_fragment_thread(LIBAROMA_CONTROLP); static LIBAROMA_CONTROL_HANDLER _libaroma_ctl_fragment_handler={ message:_libaroma_ctl_fragment_msg, draw:_libaroma_ctl_fragment_draw, focus:NULL, destroy:_libaroma_ctl_fragment_destroy, thread:_libaroma_ctl_fragment_thread }; /**************************** WINDOW HANDLERS *********************************/ byte _libaroma_ctl_fragment_window_invalidate(LIBAROMA_WINDOWP win, byte sync); byte _libaroma_ctl_fragment_window_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h); byte _libaroma_ctl_fragment_window_updatebg(LIBAROMA_WINDOWP win); byte _libaroma_ctl_fragment_window_control_isvisible( LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl ); LIBAROMA_CANVASP _libaroma_ctl_fragment_window_control_draw_begin( LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl ); void _libaroma_ctl_fragment_window_postfree(LIBAROMA_WINDOWP win); static LIBAROMA_WINDOW_HANDLER _libaroma_ctl_fragment_win_handler={ prefree:NULL, postfree:_libaroma_ctl_fragment_window_postfree, updatebg:_libaroma_ctl_fragment_window_updatebg, invalidate:_libaroma_ctl_fragment_window_invalidate, sync:_libaroma_ctl_fragment_window_sync, message_hooker:NULL, control_draw_flush:NULL, control_erasebg:NULL, control_isvisible:_libaroma_ctl_fragment_window_control_isvisible, control_draw_begin:_libaroma_ctl_fragment_window_control_draw_begin }; /************************** FRAGMENT STRUCTURE ********************************/ /* * Structure : __LIBAROMA_CTL_FRAGMENT * Typedef : _LIBAROMA_CTL_FRAGMENT, * _LIBAROMA_CTL_FRAGMENTP * Descriptions: button control internal structure */ typedef struct __LIBAROMA_CTL_FRAGMENT _LIBAROMA_CTL_FRAGMENT; typedef struct __LIBAROMA_CTL_FRAGMENT * _LIBAROMA_CTL_FRAGMENTP; struct __LIBAROMA_CTL_FRAGMENT{ LIBAROMA_WINDOWP * wins; int win_n; int win_pos; int win_pos_out; byte win_cleanup; long transition_start; long transition_duration; float transition_state; byte transition_type; byte transision_delprev; LIBAROMA_TRANSITION_CB transition_cb; LIBAROMA_RECTP transition_rs; LIBAROMA_RECTP transition_re; byte redraw; byte on_direct_canvas; byte need_direct_canvas; LIBAROMA_MUTEX mutex; LIBAROMA_MUTEX dmutex; int win_next_del_id; }; typedef struct{ int id; byte active_state; LIBAROMA_CONTROLP ctl; } _LIBAROMA_CTL_FRAGMENT_WIN, * _LIBAROMA_CTL_FRAGMENT_WINP; /************************** INTERNAL FUNCTIONS ********************************/ /* * Function : _libaroma_ctl_fragment_get_win_index * Return Value: int * Descriptions: get window index */ int _libaroma_ctl_fragment_get_win_index( _LIBAROMA_CTL_FRAGMENTP me, LIBAROMA_WINDOWP win){ int i; for (i=0;i<me->win_n;i++){ if (me->wins[i]==win){ return i; } } return -1; } /* End of _libaroma_ctl_fragment_get_win_index */ /* FRAGMENT VALIDATOR MACRO */ #define _VALIDATE_FRAGMENT(error_ret) \ _LIBAROMA_CTL_FRAGMENT_WINP wind = (_LIBAROMA_CTL_FRAGMENT_WINP) \ win->client_data; \ if (!wind){ return error_ret; } \ LIBAROMA_CONTROLP ctl=wind->ctl; \ _LIBAROMA_CTL_CHECK( \ _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, error_ret); \ int win_index = _libaroma_ctl_fragment_get_win_index(me,win); \ if (win_index==-1){ return error_ret; } /* * Function : _libaroma_ctl_fragment_direct_canvas * Return Value: byte * Descriptions: set as direct canvas */ byte _libaroma_ctl_fragment_direct_canvas(LIBAROMA_CONTROLP ctl, byte state){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0 ); libaroma_mutex_lock(me->dmutex); if ((me->win_n<1)||(me->win_pos==-1)) { libaroma_mutex_unlock(me->dmutex); return 0; } LIBAROMA_WINDOWP win = me->wins[me->win_pos]; if (state){ me->on_direct_canvas=1; } else{ if (me->on_direct_canvas){ LIBAROMA_CANVASP ccv = libaroma_control_draw_begin(ctl); if (ccv) { libaroma_draw(win->dc,ccv,0,0,0); libaroma_canvas_free(ccv); } } me->on_direct_canvas=0; } libaroma_mutex_unlock(me->dmutex); return 1; } /* End of _libaroma_ctl_fragment_direct_canvas */ /* * Function : _libaroma_ctl_fragment_window_invalidate * Return Value: byte * Descriptions: window invalidate */ byte _libaroma_ctl_fragment_window_invalidate(LIBAROMA_WINDOWP win, byte sync){ _VALIDATE_FRAGMENT(0); if ((win->dc)&&(win->bg)){ libaroma_draw(win->dc,win->bg,0,0,0); int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ /* draw no sync */ libaroma_control_draw(win->childs[i], 0); } } if (sync){ return _libaroma_ctl_fragment_window_sync(win,0,0,win->w,win->h); } return 1; } /* End of _libaroma_ctl_fragment_window_invalidate */ void _libaroma_ctl_fragment_measure(LIBAROMA_WINDOWP win){ _VALIDATE_FRAGMENT(); libaroma_mutex_lock(me->dmutex); win->x = 0; win->y = 0; win->ax=ctl->x; win->ay=ctl->y; win->w = ctl->w; win->h = ctl->h; if (win->dc){ if ((win->dc->w!=win->w)||(win->dc->h!=win->h)){ libaroma_canvas_free(win->dc); win->dc=NULL; } } if (!win->dc){ win->dc = libaroma_canvas( win->w, win->h ); } _libaroma_ctl_fragment_window_updatebg(win); int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ libaroma_window_measure(win,win->childs[i]); } libaroma_mutex_unlock(me->dmutex); } /* send activate event */ void _libaroma_ctl_fragment_activate_win(LIBAROMA_WINDOWP win, byte active){ _VALIDATE_FRAGMENT(); LIBAROMA_MSG msg; if (!active){ if (win->active){ wind->active_state=0; libaroma_wm_compose( &msg, LIBAROMA_MSG_WIN_INACTIVE, NULL, 0, 0 ); win->active=0; int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], &msg); } } } } else{ if (!win->active){ wind->active_state=1; if (!win->dc){ _libaroma_ctl_fragment_measure(win); } libaroma_wm_compose( &msg, LIBAROMA_MSG_WIN_ACTIVE, NULL, 0, 0 ); int i; win->active=1; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], &msg); } } } } } /* * Function : _libaroma_ctl_fragment_window_postfree * Return Value: void * Descriptions: post free window */ void _libaroma_ctl_fragment_window_postfree(LIBAROMA_WINDOWP win){ _VALIDATE_FRAGMENT(); if (wind){ free(wind); win->client_data=NULL; } } /* End of _libaroma_ctl_fragment_window_postfree */ /* * Function : _libaroma_ctl_fragment_window_sync * Return Value: byte * Descriptions: window sync */ byte _libaroma_ctl_fragment_window_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h){ _VALIDATE_FRAGMENT(0); if (!wind->active_state){ return 0; } me->redraw=1; return 1; } /* End of _libaroma_ctl_fragment_window_sync */ /* * Function : _libaroma_ctl_fragment_window_control_isvisible * Return Value: byte * Descriptions: check if control is visible */ byte _libaroma_ctl_fragment_window_control_isvisible( LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl ){ _VALIDATE_FRAGMENT(0); if (!wind->active_state){ return 0; } return 1; } /* End of _libaroma_ctl_fragment_window_control_isvisible */ /* * Function : _libaroma_ctl_fragment_window_control_draw_begin * Return Value: LIBAROMA_CANVASP * Descriptions: get canvas for child control */ LIBAROMA_CANVASP _libaroma_ctl_fragment_window_control_draw_begin( LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl ){ _VALIDATE_FRAGMENT(NULL); if (!wind->active_state){ return NULL; } LIBAROMA_CANVASP c=NULL; libaroma_mutex_lock(me->dmutex); if (me->on_direct_canvas){ int x = cctl->x; int y = cctl->y; int w = cctl->w; int h = cctl->h; LIBAROMA_CANVASP ccv = libaroma_control_draw_begin(ctl); if (ccv){ if ((ccv->w>x)&&(ccv->h>y)){ c = libaroma_canvas_area(ccv,x,y,w,h); } libaroma_canvas_free(ccv); } } else { if (win->dc!=NULL){ c = libaroma_canvas_area( win->dc, cctl->x, cctl->y, cctl->w, cctl->h ); } } libaroma_mutex_unlock(me->dmutex); return c; } /* End of _libaroma_ctl_fragment_window_control_draw_begin */ /* * Function : _libaroma_ctl_fragment_window_updatebg * Return Value: byte * Descriptions: window update background */ byte _libaroma_ctl_fragment_window_updatebg(LIBAROMA_WINDOWP win){ _VALIDATE_FRAGMENT(0); libaroma_mutex_lock(me->dmutex); int w = win->w; int h = win->h; if (win->bg!=NULL){ if ((win->bg->w==w)&&(win->bg->h==h)){ libaroma_mutex_unlock(me->dmutex); return 1; } libaroma_canvas_free(win->bg); } win->bg = libaroma_canvas(w,h); libaroma_canvas_setcolor( win->bg, libaroma_colorget(ctl,NULL)->window_bg, 0xff ); libaroma_mutex_unlock(me->dmutex); return 1; } /* End of _libaroma_ctl_fragment_window_sync */ /* * Function : _libaroma_ctl_fragment_draw * Return Value: void * Descriptions: draw callback */ void _libaroma_ctl_fragment_draw( LIBAROMA_CONTROLP ctl, LIBAROMA_CANVASP c){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, ); libaroma_mutex_lock(me->mutex); if ((me->win_n<1)||(me->win_pos==-1)) { libaroma_control_erasebg(ctl,c); me->redraw=0; libaroma_mutex_unlock(me->mutex); return; } if (!me->redraw){ int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<me->win_n;i++){ _LIBAROMA_CTL_FRAGMENT_WINP wind = (_LIBAROMA_CTL_FRAGMENT_WINP) me->wins[i]->client_data; if (wind->active_state){ if (!me->wins[i]->active){ _libaroma_ctl_fragment_window_invalidate(me->wins[i],0); } } } } /* draw window canvas */ libaroma_mutex_lock(me->dmutex); if (!me->on_direct_canvas){ if (me->win_pos_out==-1){ LIBAROMA_WINDOWP awin = me->wins[me->win_pos]; if (awin->dc){ libaroma_draw(c,awin->dc,0,0,0); } else{ libaroma_control_erasebg(ctl,c); } } else{ LIBAROMA_WINDOWP awin = me->wins[me->win_pos]; LIBAROMA_WINDOWP owin = me->wins[me->win_pos_out]; if (me->transition_state==1){ if (awin->dc){ libaroma_draw(c,awin->dc,0,0,0); } else{ libaroma_control_erasebg(ctl,c); } me->transition_state=0; } else if ((me->transition_cb)&&(owin->dc)&&(awin->dc)){ me->transition_cb( c, owin->dc, awin->dc, me->transition_state, me->transition_rs, me->transition_re ); } else{ /* simple alpha transition */ if (owin->dc){ libaroma_draw(c,owin->dc,0,0,0); } else{ libaroma_control_erasebg(ctl,c); } if (awin->dc){ libaroma_draw_opacity(c,awin->dc,0,0,0,0xff*me->transition_state); } } } } libaroma_mutex_unlock(me->dmutex); /* need revert to direct canvas */ if (me->need_direct_canvas){ me->need_direct_canvas=0; _libaroma_ctl_fragment_direct_canvas(ctl, 1); } me->redraw=0; libaroma_mutex_unlock(me->mutex); } /* End of _libaroma_ctl_fragment_draw */ byte libaroma_ctl_fragment_del_window_nomutex( LIBAROMA_CONTROLP ctl, int id); /* * Function : _libaroma_ctl_fragment_thread * Return Value: byte * Descriptions: control thread callback */ byte _libaroma_ctl_fragment_thread(LIBAROMA_CONTROLP ctl) { /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0 ); if ((me->win_n<1)||(me->win_pos==-1)) { return 0; } libaroma_mutex_lock(me->mutex); if (me->win_next_del_id!=-1){ libaroma_ctl_fragment_del_window_nomutex(ctl,me->win_next_del_id); me->win_next_del_id=-1; } byte is_draw = me->redraw; { int j; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (j=0;j<me->win_n;j++){ LIBAROMA_WINDOWP win = me->wins[j]; _LIBAROMA_CTL_FRAGMENT_WINP wind = (_LIBAROMA_CTL_FRAGMENT_WINP) win->client_data; if (wind->active_state){ if (win->active){ int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ LIBAROMA_CONTROLP c=win->childs[i]; if (c->handler->thread!=NULL){ if (c->handler->thread(c)){ if (libaroma_control_draw(c,0)){ is_draw=1; } } } } } } } } { if ((me->transition_start!=0)&&(me->win_pos_out!=-1)){ float nowstate=libaroma_duration_state( me->transition_start, me->transition_duration ); if (nowstate!=me->transition_state){ if (nowstate>=1){ me->transition_start=0; me->transition_state=1; me->need_direct_canvas=1; if (me->transision_delprev){ _LIBAROMA_CTL_FRAGMENT_WINP windd= (_LIBAROMA_CTL_FRAGMENT_WINP) me->wins[me->win_pos_out]->client_data; me->win_next_del_id=windd->id; } _libaroma_ctl_fragment_activate_win( me->wins[me->win_pos_out], 0 ); me->win_pos_out=-1; me->transision_delprev=0; } else{ me->transition_state=nowstate; } is_draw=1; } } } libaroma_mutex_unlock(me->mutex); return is_draw; } /* End of _libaroma_ctl_fragment_thread */ /* * Function : _libaroma_ctl_fragment_destroy * Return Value: void * Descriptions: destroy callback */ void _libaroma_ctl_fragment_destroy( LIBAROMA_CONTROLP ctl){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, ); libaroma_mutex_lock(me->mutex); if (me->win_n>0){ int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<me->win_n;i++){ libaroma_window_free(me->wins[i]); } free(me->wins); me->wins=NULL; me->win_n=0; } libaroma_mutex_unlock(me->mutex); libaroma_mutex_free(me->mutex); libaroma_mutex_free(me->dmutex); free(me); } /* End of _libaroma_ctl_fragment_destroy */ /* * Function : _libaroma_ctl_fragment_msg * Return Value: byte * Descriptions: message callback */ dword _libaroma_ctl_fragment_msg( LIBAROMA_CONTROLP ctl, LIBAROMA_MSGP msg){ /* internal check */ _LIBAROMA_CTL_CHECK( _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0 ); dword ret = 0; switch(msg->msg){ case LIBAROMA_MSG_WIN_ACTIVE: case LIBAROMA_MSG_WIN_INACTIVE: case LIBAROMA_MSG_WIN_RESIZE: { libaroma_mutex_lock(me->mutex); int z; for (z=0;z<me->win_n;z++){ LIBAROMA_WINDOWP win = me->wins[z]; _LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP) win->client_data; if (!windn->active_state){ continue; } int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } libaroma_mutex_unlock(me->mutex); } break; case LIBAROMA_MSG_WIN_MEASURED: { int z; libaroma_mutex_lock(me->mutex); for (z=0;z<me->win_n;z++){ LIBAROMA_WINDOWP win = me->wins[z]; _LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP) win->client_data; if (windn->active_state){ _libaroma_ctl_fragment_measure(win); } } libaroma_mutex_unlock(me->mutex); } break; case LIBAROMA_MSG_TOUCH: { libaroma_mutex_lock(me->mutex); if ((me->win_n<1)||(me->win_pos==-1)) { libaroma_mutex_unlock(me->mutex); return 0; } LIBAROMA_WINDOWP win = me->wins[me->win_pos]; if (me->win_pos_out!=-1){ me->win_cleanup=1; libaroma_mutex_unlock(me->mutex); return 0; } if ((msg->state!=LIBAROMA_HID_EV_STATE_DOWN)&&(me->win_cleanup)){ libaroma_mutex_unlock(me->mutex); return 0; } me->win_cleanup=0; int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(NULL,ctl,&x,&y); msg->x = x; msg->y = y; /* touch handler */ if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ win->touched = NULL; int i; for (i=0;i<win->childn;i++){ if (_libaroma_window_is_inside(win->childs[i],x,y)){ win->touched = win->childs[i]; break; } } if (win->touched!=NULL){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } } } else if (win->touched!=NULL){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } if (msg->state==LIBAROMA_HID_EV_STATE_UP){ win->touched=NULL; } } libaroma_mutex_unlock(me->mutex); } break; } return ret; } /* End of _libaroma_ctl_fragment_msg */ /* * Function : libaroma_ctl_fragment * Return Value: LIBAROMA_CONTROLP * Descriptions: create button control */ LIBAROMA_CONTROLP libaroma_ctl_fragment( LIBAROMA_WINDOWP win, word id, int x, int y, int w, int h ){ if (!win){ ALOGW("pager need direct window attach"); return NULL; } /* init internal data */ _LIBAROMA_CTL_FRAGMENTP me = (_LIBAROMA_CTL_FRAGMENTP) calloc(sizeof(_LIBAROMA_CTL_FRAGMENT),1); if (!me){ ALOGW("libaroma_ctl_fragment alloc pager memory failed"); return NULL; } me->win_pos_out=-1; me->win_pos=-1; me->wins = NULL; me->on_direct_canvas = 1; me->win_next_del_id=-1; /* init control */ LIBAROMA_CONTROLP ctl = libaroma_control_new( id, x, y, w, h, libaroma_dp(48),libaroma_dp(48), /* min size */ (voidp) me, &_libaroma_ctl_fragment_handler, NULL ); if (!ctl){ free(me); return NULL; } libaroma_mutex_init(me->mutex); libaroma_mutex_init(me->dmutex); return libaroma_window_attach(win,ctl); } /* End of libaroma_ctl_fragment */ /* * Function : libaroma_ctl_fragment_new_window * Return Value: LIBAROMA_WINDOWP * Descriptions: new window */ LIBAROMA_WINDOWP libaroma_ctl_fragment_new_window( LIBAROMA_CONTROLP ctl, int id){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, NULL ); if (!ctl->window){ ALOGW("libaroma_ctl_fragment_new_window fragment should append to " "window first"); return NULL; } libaroma_mutex_lock(me->mutex); int new_pos = me->win_n; if (me->win_n==0){ me->wins=(LIBAROMA_WINDOWP *) calloc(sizeof(LIBAROMA_WINDOWP),1); if (!me->wins){ libaroma_mutex_unlock(me->mutex); ALOGW("libaroma_ctl_fragment_new_window calloc window holder failed"); return NULL; } me->win_n=1; } else{ int i; for (i=0;i<me->win_n;i++){ _LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP) me->wins[i]->client_data; if (id==windn->id){ ALOGW("libaroma_ctl_fragment_new_window id already exist"); return NULL; } } LIBAROMA_WINDOWP * newins =(LIBAROMA_WINDOWP *) realloc( me->wins, sizeof(LIBAROMA_WINDOWP)*(me->win_n+1)); if (newins){ me->wins=newins; me->win_n++; } else{ libaroma_mutex_unlock(me->mutex); ALOGW("libaroma_ctl_fragment_new_window realloc window holder failed"); return NULL; } } me->wins[new_pos] = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1); if (!me->wins[new_pos]){ ALOGW("libaroma_ctl_fragment_new_window alloc window data failed"); if (me->win_n==1){ free(me->wins); me->win_n=0; me->wins=NULL; } else{ me->wins =(LIBAROMA_WINDOWP *) realloc(me->wins, sizeof(LIBAROMA_WINDOWP)*(me->win_n-1)); me->win_n--; } libaroma_mutex_unlock(me->mutex); return NULL; } LIBAROMA_WINDOWP nwin = me->wins[new_pos]; nwin->handler=&_libaroma_ctl_fragment_win_handler; nwin->parent=ctl->window; _LIBAROMA_CTL_FRAGMENT_WINP wind = (_LIBAROMA_CTL_FRAGMENT_WINP) calloc( sizeof(_LIBAROMA_CTL_FRAGMENT_WIN), 1); wind->id = id; wind->active_state = 0; wind->ctl = ctl; nwin->client_data = (voidp) wind; libaroma_mutex_unlock(me->mutex); return me->wins[new_pos]; } /* End of libaroma_ctl_fragment_new_window */ /* * Function : libaroma_ctl_fragment_get_window * Return Value: LIBAROMA_WINDOWP * Descriptions: get window */ LIBAROMA_WINDOWP libaroma_ctl_fragment_get_window( LIBAROMA_CONTROLP ctl, int id){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, NULL ); int i; libaroma_mutex_lock(me->mutex); for (i=0;i<me->win_n;i++){ _LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP) me->wins[i]->client_data; if (id==windn->id){ libaroma_mutex_unlock(me->mutex); return me->wins[i]; } } libaroma_mutex_unlock(me->mutex); return NULL; } /* * Function : libaroma_ctl_fragment_del_window * Return Value: byte * Descriptions: delete window */ byte libaroma_ctl_fragment_del_window_nomutex( LIBAROMA_CONTROLP ctl, int id){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0 ); /* wait for transition */ while(me->win_pos_out!=-1){ libaroma_sleep(16); } int i; int did = -1; LIBAROMA_WINDOWP win=NULL; for (i=0;i<me->win_n;i++){ _LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP) me->wins[i]->client_data; if (id==windn->id){ win=me->wins[i]; did=i; break; } } byte ret=0; if (me->win_pos==did){ ALOGW("libaroma_ctl_fragment_del_window cannot delete active window"); } else if (win){ int newn = me->win_n-1; if (newn<1){ if (me->wins){ free(me->wins); me->wins=NULL; } me->win_n=0; } else{ LIBAROMA_WINDOWP * newins = calloc(sizeof(LIBAROMA_WINDOWP),newn); int n=0; for (i=0;i<me->win_n;i++){ if (i!=did){ newins[n++]=me->wins[i]; } } free(me->wins); me->wins=newins; me->win_n=newn; } libaroma_window_free(win); } else{ ALOGW("libaroma_ctl_fragment_del_window window id not found"); } return ret; } byte libaroma_ctl_fragment_del_window( LIBAROMA_CONTROLP ctl, int id){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0 ); libaroma_mutex_lock(me->mutex); byte ret=libaroma_ctl_fragment_del_window_nomutex(ctl,id); libaroma_mutex_unlock(me->mutex); return ret; } /* * Function : libaroma_ctl_fragment_set_active_window * Return Value: byte * Descriptions: set active page */ byte libaroma_ctl_fragment_set_active_window( LIBAROMA_CONTROLP ctl, int id, byte anitype, long duration, byte remove_prev, LIBAROMA_TRANSITION_CB transcb, LIBAROMA_RECTP rect_start, LIBAROMA_RECTP rect_end ){ _LIBAROMA_CTL_CHECK( _libaroma_ctl_fragment_handler, _LIBAROMA_CTL_FRAGMENTP, 0 ); /* wait for transition */ while(me->win_pos_out!=-1){ libaroma_sleep(16); } byte ret=0; int i; int did = -1; libaroma_mutex_lock(me->mutex); LIBAROMA_WINDOWP win=NULL; for (i=0;i<me->win_n;i++){ _LIBAROMA_CTL_FRAGMENT_WINP windn = (_LIBAROMA_CTL_FRAGMENT_WINP) me->wins[i]->client_data; if (id==windn->id){ win=me->wins[i]; did=i; break; } } if (did!=-1){ if (me->win_pos!=did){ _libaroma_ctl_fragment_activate_win(win,1); libaroma_sleep(120); if (me->win_pos!=-1){ me->transition_start=libaroma_tick(); me->transition_duration=duration; me->transition_type=anitype; me->transition_state=0; me->transision_delprev=remove_prev; me->transition_cb=transcb; me->transition_rs=rect_start; me->transition_re=rect_end; _LIBAROMA_CTL_FRAGMENT_WINP windid = (_LIBAROMA_CTL_FRAGMENT_WINP) me->wins[did]->client_data; windid->active_state=2; me->win_pos_out=me->win_pos; me->win_pos=did; _libaroma_ctl_fragment_direct_canvas(ctl,0); } else{ me->win_pos_out=me->win_pos; me->win_pos=did; } ret=1; me->redraw=1; } else{ ALOGW("libaroma_ctl_fragment_set_active_window " "cannot reactivate active window"); } } else{ ALOGW("libaroma_ctl_fragment_set_active_window window id not found"); } libaroma_mutex_unlock(me->mutex); return ret; } #endif /* __libaroma_ctl_fragment_c__ */
wf3cte.c
/* WFC3 -- CTE loss correction for UVIS M. sosey Aug-2014 Adapted for the pipeline from Jay Andersons CTE correction code for wfc3 UVIS raw2raz_wfc3uv.F , an edited file was delivered december 2014, and both are different from the fortran code currently served on the wfc3 website. M. Sosey Aug-2016 Adapted to be used with Subarrays as well as full frame arrays, as long as the subarray contains physical overscan pixels, which don't include the science team subarrays which can span quads. */ # include <time.h> # include <string.h> # include <math.h> # include <stdlib.h> # include <stdio.h> # include <float.h> # ifdef _OPENMP # include <omp.h> # endif #include "hstcal.h" # include "hstio.h" # include "wf3.h" # include "wf3info.h" # include "hstcalerr.h" # include "wf3corr.h" # include "cte.h" # include "trlbuf.h" int WF3cte (char *input, char *output, CCD_Switch *cte_sw, RefFileInfo *refnames, int printtime, int verbose, int onecpu) { /* input: filename output: filename cte_sw: the calibration flags refnames: the names of the calibration reference files onecpu: use parallel processing? The following are new primary header keywords which will be added to the data so that they can be updated by the code. They are also specified in the PCTETAB reference file. These are taken from the PCTETAB CTE_NAME - name of cte algorithm CTE_VER - version number of cte algorithm CTEDATE0 - date of wfc3/uvis installation in HST, in MJD CTEDATE1 - reference date of CTE model pinning, in MJD PCTETLEN - max length of CTE trail PCTERNOI - readnoise amplitude for clipping PCTESMIT - number of iterations used in CTE forward modeling PCTESHFT - number of iterations used in the parallel transfer PCTENSMD - readnoise mitigation algorithm PCTETRSH - over-subtraction threshold PCTEFRAC - cte scaling frac calculated from expstart PCTERNOI - the readnoise clipping level to use #These are taken from getreffiles.c DRKCFILE is a new dark reference file used only in the CTE branch *_DRC.fits BIACFILE is a new super-bias reference file used only in the CTE branch *_BIC.fits PCTETAB is a new reference file FITS table which will contain the software parameter switches for the CTE correction *_CTE.fit This is the main workhorse function for removing the CTE from WFC3 UVIS images Unfortunately this happens before anything else in wfc3, so there's a lot of reading files at the beginning in order to populate needed information. The rest of the pipeline works on one chip at a time and the structures are all defined to support that. None of these structures are defined until the code enters the single chip loops. This differs from the CTE correction in ACS which occurs later in the process after basic structures are defined. */ extern int status; WF3Info wf3; /*structure with calibration switches and reference files for passing*/ Hdr phdr; /*primary header for input image, all output information saved here*/ Hdr scihdr; /*science header in case of subarray image to detect chip*/ IODescPtr ip = NULL; CTEParams cte_pars; /*STRUCTURE HOLDING THE MODEL PARAMETERS*/ SingleGroup cd; /*SCI 1, chip 2*/ SingleGroup ab; /*SCI 2, chip 1*/ SingleGroup subcd; /*subarray chip*/ SingleGroup subab; /*subarray chip*/ SingleGroup raz; /* THE LARGE FORMAT COMBINATION OF CDAB*/ SingleGroup rsz; /* LARGE FORMAT READNOISE CORRECTED IMAGE */ SingleGroup rsc; /* CTE CORRECTED*/ SingleGroup rzc; /* FINAL CTE CORRECTED IMAGE */ SingleGroup chg; /* THE CHANGE DUE TO CTE */ SingleGroup raw; /* THE RAW IMAGE IN RAZ FORMAT */ int i,j; /*loop vars*/ int max_threads=1; clock_t begin; double time_spent; float hardset=0.0; /* These are used to find subarrays with physical overscan */ int sci_bin[2]; /* bin size of science image */ int sci_corner[2]; /* science image corner location */ int ref_bin[2]; int ref_corner[2]; int rsize = 1; /* reference pixel size */ int start=0; /*where the subarray starts*/ int finish=0; /*where the subarray ends*/ /* init header vars */ initHdr(&phdr); initHdr(&scihdr); /*check if this is a subarray image. This is necessary because the CTE routine will start with the raw images from scratch and read them in so that both chips can be used. CTE is outside of the normal processing where one chip goes through the pipeline at a time, both chips are used at the same time for the correction. For the case of subarrays, a fake second chip needs to be created. The subarray is also placed inside the confines of a full size image and a mask is created to ignore pixels not associated with the original data during the cte correction. This is necessary because the pixel location itself is used as part of the correction. A secondary option would be to set the looping arrays to variable sizes and make sure all array references were consistent with the current data being processed. I decided on masking which might allow for other considerations in future updates. Only subarrays which were taken with physical overscan pixels are currently valid This distinction can be made with the CRDS ruleset for PCTECORR but it should also be checked here incase users update the header themselves for local runs. In order to check for overscan pixels I'm using the array start location instead of the APERTURE keyword information (there are known user apertures which do not have overscan pixels, but this gets around string comparisons and any future name changes or aperture additions in the future) */ begin = (double)clock(); /*CONTAIN PARALLEL PROCESSING TO A SINGLE THREAD AS USER OPTION*/ # ifdef _OPENMP trlmessage("Using parallel processing provided by OpenMP inside CTE routine"); if (onecpu){ omp_set_dynamic(0); max_threads=1; sprintf(MsgText,"onecpu == TRUE, Using only %i threads/cpu", max_threads); } else { omp_set_dynamic(0); max_threads = omp_get_num_procs(); /*be nice, use 1 less than avail?*/ sprintf(MsgText,"Setting max threads to %i of %i cpus",max_threads, omp_get_num_procs()); } omp_set_num_threads(max_threads); trlmessage(MsgText); # endif /* COPY COMMAND-LINE ARGUMENTS INTO WF3. */ WF3Init (&wf3); /*sets default information*/ strcpy (wf3.input, input); strcpy (wf3.output, output); PrBegin ("WFC3CTE"); if (wf3.printtime) TimeStamp("WFC3CTE Started: ",wf3.rootname); /* CHECK WHETHER THE OUTPUT FILE ALREADY EXISTS. */ if (FileExists (wf3.output)){ WhichError(status); return (ERROR_RETURN); } wf3.pctecorr = cte_sw->pctecorr; wf3.darkcorr = cte_sw->darkcorr; wf3.biascorr = cte_sw->biascorr; wf3.blevcorr = cte_sw->blevcorr; wf3.printtime = printtime; wf3.verbose = verbose; wf3.refnames = refnames; PrFileName ("input", wf3.input); PrFileName ("output", wf3.output); if (wf3.biascorr == COMPLETE){ trlmessage("BIASCORR complete for input image, CTE can't be performed"); return(ERROR_RETURN); } if (wf3.darkcorr == COMPLETE){ trlmessage("DARKCORR complete for input image, CTE can't be performed"); return(ERROR_RETURN); } if (wf3.blevcorr == COMPLETE){ trlmessage("BLEVCORR complete for input image, CTE can't be performed"); return(ERROR_RETURN); } /* DETERMINE THE NAMES OF THE TRAILER FILES BASED ON THE INPUT AND OUTPUT FILE NAMES, THEN INITIALIZE THE TRAILER FILE BUFFER WITH THOSE NAMES. */ if (initCTETrl (input, output)) return (status); /* OPEN INPUT IMAGE IN ORDER TO READ ITS PRIMARY HEADER. */ if (LoadHdr (wf3.input, &phdr) ){ WhichError(status); return (ERROR_RETURN); } /* GET KEYWORD VALUES FROM PRIMARY HEADER. */ if (GetKeys (&wf3, &phdr)) { freeHdr (&phdr); return (status); } if (GetCTEFlags (&wf3, &phdr)) { freeHdr(&phdr); return (status); } /*SET UP THE ARRAYS WHICH WILL BE PASSED AROUND*/ initSingleGroup(&raz); allocSingleGroup(&raz, RAZ_COLS, RAZ_ROWS, True); initSingleGroup(&rsz); allocSingleGroup(&rsz, RAZ_COLS, RAZ_ROWS, True); initSingleGroup(&rsc); allocSingleGroup(&rsc, RAZ_COLS, RAZ_ROWS, True); initSingleGroup(&rzc); allocSingleGroup(&rzc, RAZ_COLS, RAZ_ROWS, True); initSingleGroup(&raw); allocSingleGroup(&raw, RAZ_COLS, RAZ_ROWS, True); initSingleGroup(&chg); allocSingleGroup(&chg, RAZ_COLS, RAZ_ROWS, True); /*hardset the science arrays*/ for (i=0;i<RAZ_COLS;i++){ for(j=0;j<RAZ_ROWS;j++){ Pix(raw.sci.data,i,j)=hardset; Pix(raz.sci.data,i,j)=hardset; Pix(rsz.sci.data,i,j)=hardset; Pix(rsc.sci.data,i,j)=hardset; Pix(rzc.sci.data,i,j)=hardset; Pix(chg.sci.data,i,j)=hardset; } } /*READ IN THE CTE PARAMETER TABLE*/ initCTEParams(&cte_pars); if (GetCTEPars (wf3.pctetab.name, &cte_pars)) return (status); if (verbose){ PrRefInfo ("pctetab", wf3.pctetab.name, wf3.pctetab.pedigree, wf3.pctetab.descrip, wf3.pctetab.descrip2); } /* Full frame and subarrays always have group 1 If it's a subarray, the group can be from either chip and will still be labled group 1 because it's the FIRST and only group, so look at the ccdchip instead. amps ab are in chip1, sci,2 amps cd are in chip2, sci,1 */ if (wf3.subarray) { /* OPEN INPUT IMAGE IN ORDER TO READ ITS SCIENCE HEADER. */ ip = openInputImage (wf3.input, "SCI", 1); if (hstio_err()) { sprintf (MsgText, "Image: \"%s\" is not present", wf3.input); trlerror (MsgText); return (status = OPEN_FAILED); } getHeader (ip, &scihdr); if (ip != NULL) closeImage (ip); /* Get CCD-specific parameters. */ if (GetKeyInt (&scihdr, "CCDCHIP", USE_DEFAULT, 1, &wf3.chip)){ freeHdr(&scihdr); return (status); } freeHdr(&scihdr); if (wf3.chip == 2){ /*sci1,cd*/ start=0; finish=0; /*get CD subarray from first extension*/ initSingleGroup (&subcd); getSingleGroup (wf3.input, 1, &subcd); if (hstio_err()){ freeSingleGroup(&subcd); return (status = OPEN_FAILED); } /*create an empty full size chip for pasting*/ initSingleGroup(&cd); allocSingleGroup(&cd,RAZ_COLS/2,RAZ_ROWS, True); cd.group_num=1; CreateEmptyChip(&wf3, &cd); if (GetCorner(&subcd.sci.hdr, rsize, sci_bin, sci_corner)) return (status); if (GetCorner(&cd.sci.hdr, rsize, ref_bin, ref_corner)) return (status); start = sci_corner[0] - ref_corner[0]; finish = start + subcd.sci.data.nx; if ( start >= 25 && finish + 60 <= (RAZ_COLS/2) - 25){ sprintf(MsgText,"Subarray not taken with physical overscan (%i %i)\nCan't perform CTE correction\n",start,finish); trlmessage(MsgText); return(ERROR_RETURN); } /*SAVE THE PCTETABLE INFORMATION TO THE HEADER OF THE SCIENCE IMAGE AFTER CHECKING TO SEE IF THE USER HAS SPECIFIED ANY CHANGES TO THE CTE CODE VARIABLES. */ if (CompareCTEParams(&subcd, &cte_pars)) return (status); /*Put the subarray data into full frame*/ Sub2Full(&wf3, &subcd, &cd, 0, 1, 1); /* now create an empty chip 1*/ initSingleGroup(&ab); allocSingleGroup(&ab,RAZ_COLS/2,RAZ_ROWS, True); ab.group_num=2; CreateEmptyChip(&wf3, &ab); /* SAVE A COPY OF THE RAW IMAGE BEFORE BIAS FOR LATER */ makeRAZ(&cd,&ab,&raw); /* Subtract the BIAC file from the subarray before continuing The bias routine will take care of cutting out the correct image location for the subarray.*/ if (doCteBias(&wf3,&subcd)){ freeSingleGroup(&subcd); return(status); } /*reset the array after bias subtraction*/ Sub2Full(&wf3, &subcd, &cd, 0, 1, 1); } else { /*chip is 1, ab, sci2*/ start=0; finish=0; initSingleGroup(&subab); getSingleGroup(wf3.input, 1, &subab); if (hstio_err()){ freeSingleGroup(&subab); return (status = OPEN_FAILED); } /*make an empty fullsize chip for pasting*/ initSingleGroup(&ab); allocSingleGroup(&ab,RAZ_COLS/2,RAZ_ROWS, True); ab.group_num=2; CreateEmptyChip(&wf3, &ab); if ( GetCorner(&subab.sci.hdr, rsize, sci_bin, sci_corner)) return (status); if ( GetCorner(&ab.sci.hdr, rsize, ref_bin, ref_corner)) return (status); start = sci_corner[0] - ref_corner[0]; finish = start + subab.sci.data.nx; if ( start >= 25 && finish + 60 <= (RAZ_COLS/2) - 25){ sprintf(MsgText,"Subarray not taken with physical overscan (%i %i)\nCan't perform CTE correction\n",start,finish); trlmessage(MsgText); return(ERROR_RETURN); } /*add subarray to full frame image*/ Sub2Full(&wf3, &subab, &ab, 0, 1, 1); /*SAVE THE PCTETABLE INFORMATION TO THE HEADER OF THE SCIENCE IMAGE AFTER CHECKING TO SEE IF THE USER HAS SPECIFIED ANY CHANGES TO THE CTE CODE VARIABLES. */ if (CompareCTEParams(&subab, &cte_pars)) return (status); /* now create an empty chip 2*/ initSingleGroup(&cd); allocSingleGroup(&cd,RAZ_COLS/2,RAZ_ROWS, True); cd.group_num=1; CreateEmptyChip(&wf3, &cd); /* SAVE A COPY OF THE RAW IMAGE FOR LATER */ makeRAZ(&cd,&ab,&raw); /* Subtract the BIAC file from the subarray before continuing*/ subab.group_num=2; if (doCteBias(&wf3,&subab)){ freeSingleGroup(&subab); return(status); } /*reset the array after bias subtraction*/ Sub2Full(&wf3, &subab, &ab, 0, 1, 1); } } else { /* Full frame image, just read in the groups and init the mask to use all pixels */ initSingleGroup (&cd); getSingleGroup (wf3.input, 1, &cd); if (hstio_err()){ return (status = OPEN_FAILED); } initSingleGroup (&ab); getSingleGroup (wf3.input, 2, &ab); if (hstio_err()){ return (status = OPEN_FAILED); } /*setup the mask*/ for(i=0; i< ab.dq.data.nx; i++){ for(j=0; j< ab.dq.data.ny; j++){ PPix(&ab.dq.data, i, j) = 1; PPix(&cd.dq.data, i, j) = 1; } } /* SAVE A COPY OF THE RAW IMAGE FOR LATER */ makeRAZ(&cd,&ab,&raw); /***SUBTRACT THE CTE BIAS FROM BOTH CHIPS IN PLACE***/ if (doCteBias(&wf3,&cd)){ freeSingleGroup(&cd); return(status); } if (doCteBias(&wf3,&ab)){ freeSingleGroup(&ab); return(status); } /*SAVE THE PCTETABLE INFORMATION TO THE HEADER OF THE SCIENCE IMAGE AFTER CHECKING TO SEE IF THE USER HAS SPECIFIED ANY CHANGES TO THE CTE CODE VARIABLES. */ if (CompareCTEParams(&cd, &cte_pars)) return (status); } /*CONVERT TO RAZ, SUBTRACT BIAS AND CORRECT FOR GAIN*/ if (raw2raz(&wf3, &cd, &ab, &raz)) return (status); /***CALCULATE THE SMOOTH READNOISE IMAGE***/ trlmessage("CTE: Calculating smooth readnoise image"); /***CREATE THE NOISE MITIGATION MODEL ***/ if (cte_pars.noise_mit == 0) { if (raz2rsz(&wf3, &raz, &rsz, cte_pars.rn_amp, max_threads)) return (status); } else { trlmessage("Only noise model 0 implemented!"); return (status=ERROR_RETURN); } /***CONVERT THE READNOISE SMOOTHED IMAGE TO RSC IMAGE THIS IS WHERE THE CTE GETS CALCULATED ***/ if (rsz2rsc(&wf3, &rsz, &rsc, &cte_pars)) return (status); /*** CREATE THE FINAL CTE CORRECTED IMAGE, PUT IT BACK INTO ORIGNAL RAW FORMAT***/ for (i=0;i<RAZ_COLS;i++){ for(j=0; j<RAZ_ROWS; j++){ Pix(chg.sci.data,i,j) = (Pix(rsc.sci.data,i,j) - Pix(rsz.sci.data,i,j))/wf3.ccdgain; Pix(rzc.sci.data,i,j) = Pix(raw.sci.data,i,j) + Pix(chg.sci.data,i,j); } } /*BACK TO NORMAL FORMATTING*/ /*Copies rzc data to cd->sci.data and ab->sci.data */ undoRAZ(&cd,&ab,&rzc); /* COPY BACK THE SCIENCE SUBARRAYS AND SAVE THE NEW RAW FILE WITH UPDATED SCIENCE ARRAYS AND PRIMARY HEADER TO RAC */ if (wf3.subarray) { if (wf3.chip == 2) { /*** SAVE USEFUL HEADER INFORMATION ***/ if (cteHistory (&wf3, subcd.globalhdr)) return (status); /*UPDATE THE OUTPUT HEADER ONE FINAL TIME*/ PutKeyDbl(subcd.globalhdr, "PCTEFRAC", cte_pars.scale_frac,"CTE scaling fraction based on expstart"); trlmessage("PCTEFRAC saved to header"); Full2Sub(&wf3, &subcd, &cd, 0, 1, 1); putSingleGroup(output, 1, &subcd,0); freeSingleGroup(&subcd); } else { /*** SAVE USEFUL HEADER INFORMATION ***/ if (cteHistory (&wf3, subab.globalhdr)) return (status); /*UPDATE THE OUTPUT HEADER ONE FINAL TIME*/ PutKeyDbl(subab.globalhdr, "PCTEFRAC", cte_pars.scale_frac,"CTE scaling fraction based on expstart"); trlmessage("PCTEFRAC saved to header"); Full2Sub(&wf3, &subab, &ab, 0, 1, 1); putSingleGroup(output, 1, &subab,0); freeSingleGroup(&subab); } } else { /*FUll FRAME*/ /*** SAVE USEFUL HEADER INFORMATION ***/ if (cteHistory (&wf3, cd.globalhdr)) return (status); /*UPDATE THE OUTPUT HEADER ONE FINAL TIME*/ PutKeyDbl(cd.globalhdr, "PCTEFRAC", cte_pars.scale_frac,"CTE scaling fraction based on expstart"); trlmessage("PCTEFRAC saved to header"); putSingleGroup(output,cd.group_num, &cd,0); putSingleGroup(output,ab.group_num, &ab,0); } /** CLEAN UP ON AISLE 3 **/ freeSingleGroup(&rzc); freeSingleGroup(&rsc); freeSingleGroup(&chg); freeSingleGroup(&raz); freeSingleGroup(&rsz); freeSingleGroup(&raw); freeSingleGroup(&cd); freeSingleGroup(&ab); time_spent = ((double) clock()- begin +0.0) / CLOCKS_PER_SEC; if (verbose){ sprintf(MsgText,"CTE run time: %.2f(s) with %i procs/threads\n",time_spent/max_threads,max_threads); trlmessage(MsgText); } PrSwitch("pctecorr", COMPLETE); if(wf3.printtime) TimeStamp("PCTECORR Finished",wf3.rootname); return (status); } /********************* SUPPORTING SUBROUTINES *****************************/ int raw2raz(WF3Info *wf3, SingleGroup *cd, SingleGroup *ab, SingleGroup *raz){ /* convert a raw file to raz file: CDAB longwise amps, save data array for comparison with what jay has during testing -->do an additional bias correction using the residual bias level measured for each amplifier from the steadiest pixels in the horizontal overscan and subtracted fom the pixels for that amplifier. ---> convert into electrons at the end ---> add supplemental bias info to the header allocate contiguous 2d array on the heap with pointers and return the pointer to the head of the array The Following macros are used to represent 2-d indexing. Two dimensional arrays are stored in FITS order. ny ^ N | a05 a15 a25 a35 A | a04 a14 a24 a34 X | a03 a13 a23 a33 I | a02 a12 a22 a32 S | a01 a11 a21 a31 2 | a00 a10 a20 a30 ---------------------------> nx NAXIS1 NAXIS1 is 4 and NAXIS2 is 6 PIX(a,1,4) accesses a14 In the raz image, each quadrant has been rotated such that the readout amp is located at the lower left. The reoriented four quadrants are then arranged into a single 8412x2070 image (science pixels plus overscan), with amps C, D, A, and B, in that order. In the raz image, pixels are all parallel-shifted down, then serial-shifted to the left. */ extern int status; int i,j,k; /*loop counters*/ int subcol = (RAZ_COLS/4); /* for looping over quads */ extern int status; /* variable for return status */ float bias_post[4]; float bsig_post[4]; float bias_pre[4]; float bsig_pre[4]; float gain; /*INIT THE ARRAYS*/ for(i=0;i<4;i++){ bias_post[i]=0.; bsig_post[i]=0.; bias_pre[i]=0.; bsig_pre[i]=0.; } gain=wf3->ccdgain; /*REFORMAT TO RAZ*/ makeRAZ(cd,ab,raz); /*SUBTRACT THE EXTRA BIAS CALCULATED, AND MULTIPLY BY THE GAIN Note that for user subarray the image is in only 1 quad, and only has prescan bias pixels so the regions are different for full and subarrays */ if (wf3->subarray){ findPreScanBias(raz, bias_pre, bsig_pre); for (k=0;k<4;k++){ for (i=0; i<subcol;i++){ for (j=0;j<RAZ_ROWS; j++){ if(Pix(raz->dq.data,i+k*subcol,j)){ Pix(raz->sci.data,i+k*subcol,j) -= bias_pre[k]; Pix(raz->sci.data,i+k*subcol,j) *= gain; } } } } } else { findPostScanBias(raz, bias_post, bsig_post); for (k=0;k<4;k++){ for (i=0; i<subcol;i++){ for (j=0;j<RAZ_ROWS; j++){ Pix(raz->sci.data,i+k*subcol,j) -= bias_post[k]; Pix(raz->sci.data,i+k*subcol,j) *= gain; } } } } return(status); } /*calculate the post scan and bias after the biac file has been subtracted add some history information to the header Jay gave no explanation why plist is limited to 55377 for full arrays, his subarray limitation was just 1/4 of this value the serial virtual overscan pixels are also called the trailing-edge pixels these only exist in full frame images */ int findPostScanBias(SingleGroup *raz, float *mean, float *sigma){ extern int status; int arrsize = 55377; int i,j,k; /*Looping variables */ float plist[arrsize]; /*bias bpixels to measure*/ float *plistSub; float min=0.0; float max=0.0; float rmean=0.0; float rsigma=0.0; float sigreg =7.5; /*sigma clip*/ int subcol = RAZ_COLS/4; int npix=0; /*track array size for resistant mean*/ /*init plist for full size We'll allocate heap memory for smaller arrays */ for (i=0;i<arrsize;i++){ plist[i]=0.; } for (k=0;k<4;k++){ /*for each quadrant cdab = 0123*/ npix=0; /*reset for each quad*/ rmean=0.; rsigma=0.; for (i=RAZ_ROWS+5;i<= subcol-1; i++){ /*quad area for post scan bias pixels*/ for (j=0; j<2051; j++){ if (npix < arrsize){ if ( Pix(raz->dq.data,i+k*subcol,j)) { plist[npix] = Pix(raz->sci.data,i+k*subcol,j); npix+=1; } } } } if (npix > 0 ){ plistSub = (float *) calloc(npix, sizeof(float)); if (plistSub == NULL){ trlerror("out of memory for resistmean entrance in findPostScanBias."); free(plistSub); return (ERROR_RETURN); } for(i=0; i<npix; i++){ plistSub[i]=plist[i]; } resistmean(plistSub, npix, sigreg, &rmean, &rsigma, &min, &max); free(plistSub); } mean[k]= rmean; sigma[k] = rsigma; } return status; } /*CALCULATE THE PRE SCAN AND BIAS AFTER THE BIAC FILE HAS BEEN SUBTRACTED The serial physical overscan pixels are also known as the serial prescan, they are the only pixels available for subarrays. For full frame arrays the prescan is not used as part of the correction, instead the virtual overscan pixels are used and modeled in findPostScanBias. */ int findPreScanBias(SingleGroup *raz, float *mean, float *sigma){ /** this calls resistmean, which does a better job clipping outlying pixels that just a standard stddev clip single pass*/ extern int status; int arrsize = 55377; int i,j,k; /*Looping variables */ float plist[arrsize]; /*bias pixels to measure*/ float *plistSub; /*heap allocation for variable size plist array*/ float min=0.0; float max=0.0; float rmean; float rsigma; float sigreg =7.5; /*sigma clip*/ int subcol = RAZ_COLS/4; int npix=0; /*track array size for resistant mean*/ /*init plist*/ for (i=0;i<arrsize;i++){ plist[i]=0.; } for (k=0;k<4;k++){ /*for each quadrant, CDAB ordered*/ npix=0; rmean=0.; rsigma=0.; for (i=5;i<25; i++){ for (j=0; j<2051; j++){ /*all rows*/ if (npix < arrsize ){ if (Pix(raz->dq.data,i+(k*subcol),j)){ plist[npix] = Pix(raz->sci.data,i+k*subcol,j); npix+=1; } } } } if (0 < npix ){ plistSub = (float *) calloc(npix, sizeof(float)); if (plistSub == NULL){ trlerror("out of memory for resistmean entrance in findPreScanBias."); free(plistSub); return (ERROR_RETURN); } for(i=0; i<npix; i++){ plistSub[i]=plist[i]; } resistmean(plistSub, npix, sigreg, &rmean, &rsigma, &min, &max); free(plistSub); } mean[k]= rmean; sigma[k] = rsigma; if(npix>0) printf("npix=%i\nmean[%i]=%f\nsigma[%i] = %f\n",npix,k+1,rmean,k+1,rsigma); } return status; } int raz2rsz(WF3Info *wf3, SingleGroup *raz, SingleGroup *rsz, double rnsig, int max_threads){ /* This routine will read in a RAZ image and will output the smoothest image that is consistent with being the observed image plus readnoise. (RSZ image) This is necessary because we want the CTE-correction algorithm to produce the smoothest possible reconstruction, consistent with the original image and the known readnoise. This algorithm constructs a model that is smooth where the pixel-to-pixel variations can be thought of as being related to readnoise, but if the variations are too large, then it respects the pixel values. Basically... it uses a 2-sigma threshold. This is strategy #1 in a two-pronged strategy to mitigate the readnoise amplification. Strategy #2 will be to not iterate when the deblurring is less than the readnoise. */ extern int status; int i, j, NIT; /*loop variables*/ int imid; double dptr=0.0; double rms=0.0; double rmsu=0.0; double nrms=0.0; double nrmsu=0.0; float hardset=0.0f; double setdbl=0.0; /*1D ARRAYS FOR CENTRAL AND NEIGHBORING RAZ_COLS*/ double obs_loc[3][RAZ_ROWS] ; double rsz_loc[3][RAZ_ROWS] ; NIT=1; /*ALL ELEMENTS TO FLAG*/ for(i=0;i<3;i++){ for (j=0; j<RAZ_ROWS; j++){ obs_loc[i][j]=setdbl; rsz_loc[i][j]=setdbl; } } /***INITIALIZE THE LOCAL IMAGE GROUPS***/ SingleGroup rnz; initSingleGroup(&rnz); allocSingleGroup(&rnz, RAZ_COLS, RAZ_ROWS, True); SingleGroup zadj; initSingleGroup(&zadj); allocSingleGroup(&zadj, RAZ_COLS, RAZ_ROWS, True); /*COPY THE RAZ IMAGE INTO THE RSZ OUTPUT IMAGE AND INITIALIZE THE OTHER IMAGES*/ for(i=0;i<RAZ_COLS;i++){ for (j=0;j<RAZ_ROWS;j++){ Pix(rsz->sci.data,i,j) = Pix(raz->sci.data,i,j); Pix(rsz->dq.data,i,j) = Pix(raz->dq.data,i,j); Pix(rnz.sci.data,i,j) = hardset; Pix(zadj.sci.data,i,j) = hardset; } } /*THE RSZ IMAGE JUST GETS UPDATED AS THE RAZ IMAGE IN THIS CASE*/ if (rnsig < 0.1){ trlmessage("rnsig < 0.1, No read-noise mitigation needed"); return(status); } /*GO THROUGH THE ENTIRE IMAGE AND ADJUST PIXELS TO MAKE THEM SMOOTHER, BUT NOT SO MUCH THAT IT IS NOT CONSISTENT WITH READNOISE. DO THIS IN BABY STEPS SO THAT EACH ITERATION DOES VERY LITTLE ADJUSTMENT AND INFORMATION CAN GET PROPAGATED DOWN THE LINE. */ rms=setdbl; for(NIT=1; NIT<=100; NIT++){ #pragma omp parallel for schedule(dynamic) \ private(i,j,imid,obs_loc,rsz_loc,dptr)\ shared(raz, rsz, rnsig,rms,nrms, zadj) for(i=0; i<RAZ_COLS; i++){ imid=i; /*RESET TO MIDDLE RAZ_COLS AT ENDPOINTS*/ if (imid < 1) imid=1; if (imid == RAZ_COLS-1) imid = RAZ_COLS-2; /*COPY THE MIDDLE AND NEIGHBORING PIXELS FOR ANALYSIS*/ for(j=0; j<RAZ_ROWS; j++){ obs_loc[0][j] = Pix(raz->sci.data,imid-1,j); obs_loc[1][j] = Pix(raz->sci.data,imid,j); obs_loc[2][j] = Pix(raz->sci.data,imid+1,j); rsz_loc[0][j] = Pix(rsz->sci.data,imid-1,j); rsz_loc[1][j] = Pix(rsz->sci.data,imid,j); rsz_loc[2][j] = Pix(rsz->sci.data,imid+1,j); } for (j=0; j<RAZ_ROWS; j++){ if(Pix(raz->dq.data,imid,j)) { find_dadj(1+i-imid,j, obs_loc, rsz_loc, rnsig, &dptr); Pix(zadj.sci.data,i,j) = dptr; } } } /*end the parallel for*/ /*NOW GO OVER ALL THE RAZ_COLS AND RAZ_ROWS AGAIN TO SCALE THE PIXELS */ for(i=0; i<RAZ_COLS;i++){ for(j=0; j<RAZ_ROWS; j++){ if (Pix(raz->dq.data,i,j)){ Pix(rsz->sci.data,i,j) += (Pix(zadj.sci.data,i,j)*0.75); Pix(rnz.sci.data,i,j) = (Pix(raz->sci.data,i,j) - Pix(rsz->sci.data,i,j)); } } } rms=setdbl; nrms=setdbl; /*This is probably a time sink because the arrays are being accessed out of storage order, careful of page faults */ #pragma omp parallel for schedule(dynamic,1)\ private(i,j,rmsu,nrmsu) \ shared(raz,rsz,rms,rnsig,nrms) for(j=0; j<RAZ_ROWS; j++){ nrmsu=setdbl; rmsu=setdbl; for(i = 0;i<RAZ_COLS; i++){ if ( (fabs(Pix(raz->sci.data,i,j)) > 0.1) || (fabs(Pix(rsz->sci.data,i,j)) > 0.1) ){ rmsu += ( Pix(rnz.sci.data,i,j) * Pix(rnz.sci.data,i,j) ); nrmsu += 1.0; } } #pragma omp critical (rms) { rms += rmsu; nrms += nrmsu; } } rms = sqrt(rms/nrms); /*epsilon type comparison*/ if ( (rnsig-rms) < 0.00001) break; /*this exits the NIT for loop*/ } /*end NIT*/ freeSingleGroup(&zadj); freeSingleGroup(&rnz); return (status); } int find_dadj(int i ,int j, double obsloc[][RAZ_ROWS], double rszloc[][RAZ_ROWS], double rnsig, double *d){ /* This function determines for a given pixel how it can adjust in a way that is not inconsistent with its being readnoise. To do this, it looks at its upper and lower neighbors and sees whether it is consistent with either (modulo readnoise). To the extent that it is consistent then move it towards them. But also bear in mind that that we don't want it to be more than 2 RN sigmas away from its original value. This is pretty much a tug of war... with readnoise considerations pushing pixels to be closer to their neighbors, but the original pixel values also pull to keep the pixel where it was. Some accommodation is made for both considerations. */ extern int status; double mval=0.0; double dval0, dval0u, w0; double dval9, dval9u, w9; double dmod1, dmod1u, w1; double dmod2, dmod2u, w2; dval0=0.; dval0u=0.; w0=0.; dval9=0.; dval9u=0.; w9=0.; dmod1=0.; dmod1u=0.; w1=0.; dmod2=0.; dmod2u=0.; w2=0.; mval = rszloc[i][j]; dval0 = obsloc[i][j] - mval; dval0u = dval0; if (dval0u >1.0) dval0u = 1.0; if (dval0u <-1.0) dval0u = -1.0; dval9 = 0.; /*COMPARE THE SURROUNDING PIXELS*/ if (i==1 && RAZ_ROWS-1>j && j>0 ) { dval9 = obsloc[i][j-1] - rszloc[i][j-1] + obsloc[i][j] - rszloc[i][j] + obsloc[i][j+1] - rszloc[i][j+1] + obsloc[i-1][j-1]- rszloc[i-1][j-1] + obsloc[i-1][j] - rszloc[i-1][j] + obsloc[i-1][j+1]- rszloc[i-1][j+1] + obsloc[i+1][j-1]- rszloc[i+1][j-1] + obsloc[i+1][j] - rszloc[i+1][j] + obsloc[i+1][j+1]- rszloc[i+1][j+1]; } dval9 =dval9 / 9.; dval9u = dval9; if (dval9u > (rnsig*0.33)) dval9u = rnsig*0.33; if (dval9u < rnsig*-0.33) dval9u = rnsig*-0.33; dmod1 = 0.; if (j>0) dmod1 = rszloc[i][j-1] - mval; dmod1u = dmod1; if (dmod1u > rnsig*0.33) dmod1u = rnsig*0.33; if (dmod1u < rnsig*-0.33) dmod1u = rnsig*-0.33; dmod2 = 0.; if (j < RAZ_ROWS-1) dmod2 = rszloc[i][j+1] - mval; dmod2u = dmod2; if (dmod2u > rnsig*0.33) dmod2u = rnsig*0.33; if (dmod2u < rnsig*-0.33) dmod2u = rnsig*-0.33; /* IF IT'S WITHIN 2 SIGMA OF THE READNOISE, THEN TEND TO TREAT AS READNOISE; IF IT'S FARTHER OFF THAN THAT, THEN DOWNWEIGHT THE INFLUENCE */ w0 = (dval0*dval0) / ((dval0*dval0)+ 4.0*(rnsig*rnsig)); w9 = (dval9*dval9) / ((dval9*dval9)+ 18.0*(rnsig*rnsig)); w1 = (4*rnsig*rnsig) / ((dmod1*dmod1)+4.0*(rnsig*rnsig)); w2 = (4*rnsig*rnsig) / ((dmod2*dmod2)+4.0*(rnsig*rnsig)); /*(note that with the last two, if a pixel is too discordant with its upper or lower that neighbor has less of an ability to pull it)*/ *d = ((dval0u * w0 * 0.25f) + /* desire to keep the original pixel value */ (dval9u*w9*0.25f) + /* desire to keep the original sum over 3x3*/ (dmod1u*w1*0.25f) + /*desire to get closer to the pixel below*/ (dmod2u*w2*0.25f)) ; /*desire to get closer to the pixel above*/ return(status); } /*** THIS ROUTINE PERFORMS THE CTE CORRECTIONS rsz is the readnoise smoothed image rsc is the coorection output image rac = raw + ((rsc-rsz) / gain ) ***/ int rsz2rsc(WF3Info *wf3, SingleGroup *rsz, SingleGroup *rsc, CTEParams *cte) { extern int status; int i,j; double cte_i=0.0; double cte_j=0.0; double ro=0; int io=0; double ff_by_col[RAZ_COLS][4]; float hardset=0.0; /*These are already in the parameter structure int Ws the number of traps < 999999, taken from pctetab read int q_w[TRAPS]; the run of charge with level cte->qlevq_data[] float dpde_w[TRAPS]; the run of charge loss with level cte->dpdew_data[] float rprof_wt[TRAPS][100]; the emission probability as fn of downhill pixel, TRAPS=999 float cprof_wt[TRAPS][100]; the cumulative probability cprof_t( 1) = 1. - rprof_t(1) The rprof array gives the fraction of charge that comes out of every parallel serial-shift the cummulative distribution in cprof then tells you what's left */ SingleGroup pixz_fff; initSingleGroup(&pixz_fff); allocSingleGroup(&pixz_fff, RAZ_COLS, RAZ_ROWS, True); /*SCALE BY 1 UNLESS THE PCTETAB SAYS OTHERWISE, I IS THE PACKET NUM THIS IS A SAFETY LOOP INCASE NOT ALL THE COLUMNS ARE POPULATED IN THE REFERENCE FILE*/ for(i=0; i<RAZ_COLS;i++){ ff_by_col[i][0]=1.; ff_by_col[i][1]=1.; ff_by_col[i][2]=1.; ff_by_col[i][3]=1.; j= cte->iz_data[i]; /*which column to scale*/ ff_by_col[j][0]=cte->scale512[i]; ff_by_col[j][1]=cte->scale1024[i]; ff_by_col[j][2]=cte->scale1536[i]; ff_by_col[j][3]=cte->scale2048[i]; /*CALCULATE THE CTE CORRECTION FOR EVERY PIXEL Index is figured on the final size of the image not the current size. Moved above */ for(j=0; j<RAZ_ROWS; j++){ Pix(pixz_fff.sci.data,i,j)=hardset; ro = j/512.0; /*ro can be zero, it's an index*/ if (ro <0 ) ro=0.; if (ro > 2.999) ro=2.999; /*only 4 quads, 0 to 3*/ io = (int) floor(ro); /*force truncation towards 0 for pos numbers*/ cte_j= (j+1) / 2048.0; cte_i= ff_by_col[i][io] + (ff_by_col[i][io+1] -ff_by_col[i][io]) * (ro-io); Pix(pixz_fff.sci.data,i,j) = (cte_i*cte_j); } } /*FOR REFERENCE TO JAYS CODE, FF_BY_COL IS WHAT'S IN THE SCALE BY COLUMN int iz_data[RAZ_ROWS]; column number in raz format double scale512[RAZ_ROWS]; scaling appropriate at row 512 double scale1024[RAZ_ROWS]; scaling appropriate at row 1024 double scale1536[RAZ_ROWS]; scaling appropriate at row 1536 double scale2048[RAZ_ROWS]; scaling appropriate at row 2048 */ /*THIS IS RAZ2RAC_PAR IN JAYS CODE - MAIN CORRECTION LOOP IN HERE*/ inverse_cte_blur(rsz, rsc, &pixz_fff, cte, wf3->verbose,wf3->expstart); freeSingleGroup(&pixz_fff); return(status); } /*** this routine does the inverse CTE blurring... it takes an observed image and generates the image that would be pushed through the readout algorithm to generate the observation CTE_FF is found using the observation date of the data FIX_ROCRs is cte->fix_rocr Ws is the number of TRAPS that are < 999999 this is sub_wfc3uv_raz2rac_par in jays code floor rounds to negative infinity ceiling rounds to positive infinity truncate rounds up or down to zero round goes to the nearest integer fff is the input cte scaling array calculated over all pixels This is a big old time sink function ***/ int inverse_cte_blur(SingleGroup *rsz, SingleGroup *rsc, SingleGroup *fff, CTEParams *cte, int verbose, double expstart){ extern int status; /*looping vars*/ int NREDO, REDO; int NITINV, NITCTE; int i; int j,jj; double dmod; int jmax; float hardset=0.0f; int totflux=0; double cte_ff; /*cte scaling based on observation date*/ double setdbl=0.0; /*DEFINE TO MAKE PRIVATE IN PARALLEL RUN*/ double *pix_obsd=&setdbl; double *pix_modl=&setdbl; double *pix_curr=&setdbl; double *pix_init=&setdbl; double *pix_read=&setdbl; double *pix_ctef=&setdbl; /*STARTING DEFAULTS*/ NITINV=1; NITCTE=1; cte_ff=0.0; jmax=0; dmod=0.0; /*LOCAL IMAGES TO PLAY WITH, THEY WILL REPLACE THE INPUTS*/ SingleGroup rz; /*pixz_raz*/ initSingleGroup(&rz); allocSingleGroup(&rz, RAZ_COLS, RAZ_ROWS, True); SingleGroup rc; /*pixz_rac*/ initSingleGroup(&rc); allocSingleGroup(&rc, RAZ_COLS, RAZ_ROWS, True); SingleGroup pixz_fff; /*pixz_fff*/ initSingleGroup(&pixz_fff); allocSingleGroup(&pixz_fff, RAZ_COLS, RAZ_ROWS, True); /*USE EXPSTART YYYY-MM-DD TO DETERMINE THE CTE SCALING APPROPRIATE FOR THE GIVEN DATE. WFC3/UVIS WAS INSTALLED AROUND MAY 11,2009 AND THE MODEL WAS CONSTRUCTED TO BE VALID AROUND SEP 3, 2012, A LITTLE OVER 3 YEARS AFTER INSTALLATION*/ cte_ff= (expstart - cte->cte_date0)/ (cte->cte_date1 - cte->cte_date0); cte->scale_frac=cte_ff; /*save to param structure for header update*/ if(verbose){ sprintf(MsgText,"CTE_FF (scaling fraction by date) = %g",cte_ff); trlmessage(MsgText); } /*SET UP THE SCALING ARRAY WITH INPUT DATA, hardset arrays for safety*/ for (i=0;i<RAZ_COLS;i++){ for(j=0;j<RAZ_ROWS;j++){ Pix(rc.sci.data,i,j)=hardset; Pix(rz.sci.data,i,j)=hardset; Pix(pixz_fff.sci.data,i,j)=hardset; Pix(rz.sci.data,i,j) = Pix(rsz->sci.data,i,j); Pix(rz.dq.data,i,j) = Pix(rsz->dq.data,i,j); Pix(pixz_fff.sci.data,i,j) = cte_ff * Pix(fff->sci.data,i,j); } } #pragma omp parallel for schedule (dynamic,1) \ private(dmod,i,j,jj,jmax,REDO,NREDO,totflux, \ pix_obsd,pix_modl,pix_curr,pix_init,\ pix_read,pix_ctef,NITINV,NITCTE)\ shared(rc,rz,cte,pixz_fff) for (i=0; i< RAZ_COLS; i++){ pix_obsd = (double *) calloc(RAZ_ROWS, sizeof(double)); pix_modl = (double *) calloc(RAZ_ROWS, sizeof(double)); pix_curr = (double *) calloc(RAZ_ROWS, sizeof(double)); pix_init = (double *) calloc(RAZ_ROWS, sizeof(double)); pix_read = (double *) calloc(RAZ_ROWS, sizeof(double)); pix_ctef = (double *) calloc(RAZ_ROWS, sizeof(double)); totflux=0; /*HORIZONTAL PRE/POST SCAN POPULATION */ for (j=0; j< RAZ_ROWS; j++){ if(Pix(rz.dq.data,i,j)){ pix_obsd[j] = Pix(rz.sci.data,i,j); /*starts as input RAZ*/ totflux += 1; } } if (totflux >= 1) {/*make sure the column has flux in it*/ NREDO=0; /*START OUT NOT NEEDING TO MITIGATE CRS*/ do { /*replacing goto 9999*/ REDO=0; /*FALSE*/ /*STARTING WITH THE OBSERVED IMAGE AS MODEL, ADOPT THE SCALING FOR THIS COLUMN*/ for (j=0; j<RAZ_ROWS; j++){ pix_modl[j] = Pix(rz.sci.data,i,j); pix_ctef[j] = Pix(pixz_fff.sci.data,i,j); } /*START WITH THE INPUT ARRAY BEING THE LAST OUTPUT IF WE'VE CR-RESCALED, THEN IMPLEMENT CTEF*/ for (NITINV=1; NITINV<=cte->n_forward; NITINV++){ for (j=0; j<RAZ_ROWS; j++){ pix_curr[j]=pix_modl[j]; pix_read[j]=pix_modl[j]; pix_ctef[j]=Pix(pixz_fff.sci.data,i,j); } /*TAKE EACH PIXEL DOWN THE DETECTOR IN NCTENPAR=7*/ for (NITCTE=1; NITCTE<=cte->n_par; NITCTE++){ sim_colreadout_l(pix_curr, pix_read, pix_ctef, cte); /*COPY THE JUST UPDATED READ OUT IMAGE INTO THE INPUT IMAGE*/ for (j=0; j< RAZ_ROWS; j++){ pix_curr[j]=pix_read[j]; } } /* end NITCTE */ /*DAMPEN THE ADJUSTMENT IF IT IS CLOSE TO THE READNOISE, THIS IS AN ADDITIONAL AID IN MITIGATING THE IMPACT OF READNOISE*/ for (j=0; j< RAZ_ROWS; j++){ dmod = (pix_obsd[j] - pix_read[j]); if (NITINV < cte->n_forward){ dmod *= (dmod*dmod) /((dmod*dmod) + (cte->rn_amp * cte->rn_amp)); } pix_modl[j] += dmod; /*dampen each pixel as the best is determined*/ } } /*NITINV end*/ /*LOOK FOR AND DOWNSCALE THE CTE MODEL IF WE FIND THE TELL-TALE SIGN OF READOUT CRS BEING OVERSUBTRACTED; IF WE FIND ANY THEN GO BACK UP AND RERUN THIS COLUMN THE WFC3 UVIS MODEL SEARCHES FOR OVERSUBTRACTED TRAILS. WHICH ARE DEFINED AS EITHER: - A SINGLE PIXEL VALUE BELOW -10E- - TWO CONSECUTIVE PIXELS TOTALING -12 E- - THREE TOTALLING -15 E- WHEN WE DETECT SUCH AN OVER-SUBTRACTED TAIL, WE ITERATIVELY REDUCE THE LOCAL CTE SCALING BY 25% UNTIL THE TRAIL IS NO LONGER NEGATIVE THIS DOES NOT IDENTIFY ALL READOUT-CRS, BUT IT DOES DEAL WITH MANY OF THEM. FOR IMAGES THAT HAVE BACKGROUND GREATER THAN 10 OR SO, THIS WILL STILL END UP OVERSUBTRACTING CRS A BIT, SINCE WE ALLOW THEIR TRAILS TO BE SUBTRACTED DOWN TO -10 RATHER THAN 0. */ if (cte->fix_rocr) { for (j=10; j< RAZ_ROWS-2; j++){ if ( (( cte->thresh > pix_modl[j] ) && ( cte->thresh > (pix_modl[j] - pix_obsd[j]))) || (((pix_modl[j] + pix_modl[j+1]) < -12.) && (pix_modl[j] + pix_modl[j+1] - pix_obsd[j] - pix_obsd[j+1] < -12.)) || (((pix_modl[j] + pix_modl[j+1] + pix_modl[j+2]) < -15.) && ((pix_modl[j] + pix_modl[j+1] + pix_modl[j+2] -pix_obsd[j] - pix_obsd[j+1] - pix_obsd[j+2]) <-15.)) ){ jmax=j; /*GO DOWNSTREAM AND LOOK FOR THE OFFENDING CR*/ for (jj=j-10; jj<=j;jj++){ if ( (pix_modl[jj] - pix_obsd[jj]) > (pix_modl[jmax] - pix_obsd[jmax]) ) { jmax=jj; } } /* DOWNGRADE THE CR'S SCALING AND ALSO FOR THOSE BETWEEN THE OVERSUBTRACTED PIXEL AND IT*/ for (jj=jmax; jj<=j;jj++){ Pix(pixz_fff.sci.data,i,jj) *= 0.75; } REDO=1; /*TRUE*/ } /*end if*/ } /*end for j*/ }/*end fix cr*/ if (REDO) NREDO +=1; if (NREDO == 5) REDO=0; /*stop*/ } while (REDO); /*replacing goto 9999*/ } /*totflux > 1, catch for subarrays*/ for (j=0; j< RAZ_ROWS; j++){ if (Pix(rz.dq.data,i,j)){ Pix(rc.sci.data,i,j)= pix_modl[j]; } } free(pix_obsd); free(pix_modl); free(pix_curr); free(pix_init); free(pix_read); free(pix_ctef); } /*end i*/ for (i=0; i< RAZ_COLS; i++){ for (j=0; j< RAZ_ROWS; j++){ if(Pix(rsz->dq.data,i,j)){ Pix(rsz->sci.data,i,j) = Pix(rz.sci.data,i,j); Pix(rsc->sci.data,i,j) = Pix(rc.sci.data,i,j); Pix(fff->sci.data,i,j) = Pix(pixz_fff.sci.data,i,j); } } } freeSingleGroup(&rz); freeSingleGroup(&rc); freeSingleGroup(&pixz_fff); return(status); } /*This is the workhorse subroutine; it simulates the readout of one column pixi() and outputs this to pixo() using a single iteration. It can be called successively to do the transfer in steps. JDIM == RAZ_ROWS WDIM == TRAPS Ws is the input traps number < 999999 NITs == cte_pars->n_par These are already in the parameter structure CTEParams int Ws the number of traps < 999999 float q_w[TRAPS]; the run of charge with level == qlevq_data float dpde_w[TRAPS]; the run of charge loss with level == dpdew_data float rprof_wt[TRAPS][100]; the emission probability as fn of downhill pixel == rprof fits image float cprof_wt[TRAPS][100]; the cumulative probability cprof_t( 1) = 1. - rprof_t(1) == cprof fits image W = wcol_data = trap id q_w[TRAP] = qlev_q from QPROF traps as function of packet size = cte->qlevq_data[TRAP] pixi (curr), pixo (read) , pixf(cteff) are passed and are 1d arrays which have values for a particular column the ttrap reference to the image array has to be -1 for C */ int sim_colreadout_l(double *pixi, double *pixo, double *pixf, CTEParams *cte){ extern int status; int j; int ttrap; int w; double ftrap; double pix_1; double padd_2; double padd_3; double prem_3; double pmax; double fcarry; padd_3=0.0; prem_3=0.0; padd_2=0.0; fcarry=0.0; pix_1=0.0; w=0; j=0; ftrap=0.0; ttrap=0; FloatHdrData *rprof; FloatHdrData *cprof; /*from the reference table*/ rprof = cte->rprof; cprof = cte->cprof; /*FIGURE OUT WHICH TRAPS WE DON'T NEED TO WORRY ABOUT IN THIS COLUMN PMAX SHOULD ALWAYS BE POSITIVE HERE */ pmax=10.; for(j=0; j<RAZ_ROWS; j++){ pixo[j] = pixi[j]; if (pixo[j] > pmax) pmax=pixo[j]; } /*GO THROUGH THE TRAPS ONE AT A TIME, FROM HIGHEST TO LOWEST Q, AND SEE WHEN THEY GET FILLED AND EMPTIED, ADJUST THE PIXELS ACCORDINGLY*/ for (w = cte->cte_traps-1; w>=0; w--){ if ( cte->qlevq_data[w] <= pmax ) { ftrap = 0.0e0; ttrap = cte->cte_len; /*for referencing the image at 0*/ fcarry = 0.0e0; /*GO UP THE COLUMN PIXEL BY PIXEL*/ for(j=0; j<RAZ_ROWS;j++){ pix_1 = pixo[j]; if ( (ttrap < cte->cte_len) || ( pix_1 >= cte->qlevq_data[w] - 1. ) ){ if (pixo[j] >= 0 ){ pix_1 = pixo[j] + fcarry; /*shuffle charge in*/ fcarry = pix_1 - floor(pix_1); /*carry the charge remainder*/ pix_1 = floor(pix_1); /*reset pixel*/ } /*HAPPENS AFTER FIRST PASS*/ /*SHUFFLE CHARGE IN*/ if ( j> 0 ) { if (pixf[j] < pixf[j-1]) ftrap *= (pixf[j] / pixf[j-1]); } /*RELEASE THE CHARGE*/ padd_2=0.0; if (ttrap <cte->cte_len){ ttrap += 1; padd_2 = Pix(rprof->data,w,ttrap-1) *ftrap; } padd_3 = 0.0; prem_3 = 0.0; if ( pix_1 >= cte->qlevq_data[w]){ prem_3 = cte->dpdew_data[w] / cte->n_par * pixf[j]; /*dpdew is 1 in file */ if (ttrap < cte->cte_len) padd_3 = Pix(cprof->data,w,ttrap-1)*ftrap; ttrap=0; ftrap=prem_3; } pixo[j] += padd_2 + padd_3 - prem_3; } /*replaces trap continue*/ }/*end if j>0*/ }/* end if qlevq > pmax, replaces continue*/ }/*end for w*/ return(status); } int initCTETrl (char *input, char *output) { extern int status; char trl_in[CHAR_LINE_LENGTH+1]; /* trailer filename for input */ char trl_out[CHAR_LINE_LENGTH+1]; /* output trailer filename */ int exist; int MkName (char *, char *, char *, char *, char *, int); int TrlExists (char *); /* Initialize internal variables */ trl_in[0] = '\0'; trl_out[0] = '\0'; exist = EXISTS_UNKNOWN; /* Input and output suffixes. */ char *isuffix[] = {"_raw"}; char *osuffix[] = {"_rac_tmp"}; char *trlsuffix[] = {""}; int nsuffix = 1; /* Start by stripping off suffix from input/output filenames */ if (MkOutName (input, isuffix, trlsuffix, nsuffix, trl_in, CHAR_LINE_LENGTH)) { WhichError (status); sprintf (MsgText, "Couldn't determine trailer filename for %s", input); trlmessage (MsgText); } if (MkOutName (output, osuffix, trlsuffix, nsuffix, trl_out, CHAR_LINE_LENGTH)) { WhichError (status); sprintf (MsgText, "Couldn't create trailer filename for %s", output); trlmessage (MsgText); } /* NOW, CONVERT TRAILER FILENAME EXTENSIONS FROM '.FITS' TO '.TRL' */ if (MkNewExtn (trl_in, TRL_EXTN) ) { sprintf (MsgText, "Error with input trailer filename %s", trl_in); trlerror (MsgText); WhichError (status); } if (MkNewExtn (trl_out, TRL_EXTN) ) { sprintf (MsgText, "Error with output trailer filename %s", trl_out); trlerror (MsgText); WhichError (status); } /* If we are working with a RAW file, then see if a TRL file needs to be overwritten after the generic conversion comments. */ if (strstr(input, isuffix[0]) != NULL) { /* Test whether the output file already exists */ exist = TrlExists(trl_out); if (exist == EXISTS_YES) { /* The output file exists, so we want to add to them ** the new trailer comments. */ SetTrlOverwriteMode (NO); } } /* Sets up temp trailer file for output and copies input ** trailer file into it. */ InitTrlFile (trl_in, trl_out); return(status); }
libSHCITools.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: James E. T. Smith james.e.smith@colorado.edu (2/7/17) * * This is a shared library for use interfacing the pyscf package with the Dice * package. */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <omp.h> //#include "config.h" #include <complex.h> void transformRDMDinfh(int norbs, int* nRows, int* rowInds, double* rowCoeffs, double* int2, double* newint2) { size_t n3 = norbs*norbs*norbs, n2=norbs*norbs; #pragma omp parallel { int i,j,k,l,ia,ja,ka,la; for (i=0; i<norbs; i++) { if (i%omp_get_num_threads() != omp_get_thread_num()) continue; for (j=0; j<norbs; j++) for (k=0; k<norbs; k++) for (l=0; l<norbs; l++) { double _Complex comp = 0.0; for (ia=0; ia<nRows[i]; ia++) for (ja=0; ja<nRows[j]; ja++) for (ka=0; ka<nRows[k]; ka++) for (la=0; la<nRows[l]; la++) { int ii,jj,kk,ll; ii = rowInds[2*i+ia], jj = rowInds[2*j+ja], kk = rowInds[2*k+ka], ll = rowInds[2*l+la]; double _Complex ci = rowCoeffs[4*i+2*ia] + rowCoeffs[4*i+2*ia+1]*I; double _Complex cj = rowCoeffs[4*j+2*ja] + rowCoeffs[4*j+2*ja+1]*I; double _Complex ck = rowCoeffs[4*k+2*ka] + rowCoeffs[4*k+2*ka+1]*I; double _Complex cl = rowCoeffs[4*l+2*la] + rowCoeffs[4*l+2*la+1]*I; comp = comp + conj(ci)*cj*conj(ck)*cl*int2[ii*n3+jj*n2+kk*norbs+ll]; } if (cimag(comp) > 1.e-4) { printf("Error in %d %d %d %d element of rdm (%g,%g)\n", i, j, k, l, creal(comp), cimag(comp)); exit(0); } newint2[i*n3+j*n2+k*norbs+l] = creal(comp); } } } printf("Done rdm \n"); } void transformDinfh(int norbs, int* nRows, int* rowInds, double* rowCoeffs, double* int2, double* newint2) { size_t n3 = norbs*norbs*norbs, n2=norbs*norbs; #pragma omp parallel { int i,j,k,l,ia,ja,ka,la; for (i=0; i<norbs; i++) { if (i%omp_get_num_threads() != omp_get_thread_num()) continue; for (j=0; j<norbs; j++) for (k=0; k<norbs; k++) for (l=0; l<norbs; l++) { double _Complex comp = 0.0; for (ia=0; ia<nRows[i]; ia++) for (ja=0; ja<nRows[j]; ja++) for (ka=0; ka<nRows[k]; ka++) for (la=0; la<nRows[l]; la++) { int ii, jj, kk, ll; ii = rowInds[2*i+ia], jj = rowInds[2*j+ja], kk = rowInds[2*k+ka], ll = rowInds[2*l+la]; int sgnf = ia+ja+ka+la; double sign = sgnf==2 ? -1. : 1.; if (sgnf%2 == 0) newint2[i*n3+j*n2+k*norbs+l] += sign*pow(-1., ia)*pow(-1., ka)*int2[ii*n3+jj*n2+kk*norbs+ll]*rowCoeffs[2*i+ia]*rowCoeffs[2*j+ja]*rowCoeffs[2*k+ka]*rowCoeffs[2*l+la]; } } } } } void writeIntNoSymm(int norbs, double* int1, double* int2, double coreE, int nelec, int* irrep) { size_t n3 = norbs*norbs*norbs, n2=norbs*norbs; FILE *fp; fp=fopen("FCIDUMP", "w"); fprintf(fp, "&FCI NORBS=%d, NELEC=%d, MS2=0\n", norbs, nelec); fprintf(fp, "ORBSYM="); int i,j,k,l,ia,ja,ka,la; for (i=0; i<norbs; i++) { fprintf(fp, "%d,", irrep[i]); } fprintf(fp,"\nISYM=1\nKSYM\n&END\n"); for (i=0; i<norbs; i++) for (j=0; j<norbs; j++) for (k=0; k<norbs; k++) for (l=0; l<norbs; l++) { if (fabs(int2[i*n3+j*n2+k*norbs+l]) >= 1.e-9 && i*norbs+j >= k*norbs+l) { fprintf(fp, "%20.12f %d %d %d %d\n", int2[i*n3+j*n2+k*norbs+l], i+1, j+1, k+1, l+1); } } for (i=0; i<norbs; i++) for (j=i; j<norbs; j++) if (fabs(int1[i*norbs+j]) > 1.e-9) fprintf(fp, "%20.12f %d %d %d %d\n", int1[i*norbs+j], i+1, j+1, 0, 0); fprintf(fp, "%20.12f %d %d %d %d\n", coreE, 0,0,0,0); fclose(fp); } /* This function is the basic reader for second order spatialRDM files and should return arrays that can be iterated over. The switching of the idices is intentional when saving the elements of the 2RDM and arises from a difference in notation between pyscf and SHCI. */ void r2RDM( double * twoRDM, size_t norb, char * fIn ){ char line[255]; FILE *fp = fopen( fIn, "r" ); fgets(line, sizeof(line), fp); int norbs = atoi( strtok(line, " ,\t\n") ); assert( norbs == norb ); int norbs2 = norbs*norbs; int norbs3 = norbs2*norbs; while ( fgets(line, sizeof(line), fp) != NULL ) { int i = atoi( strtok(line, " ,\t\n") ); int k = atoi( strtok(NULL, " ,\t\n") ); int j = atoi( strtok(NULL, " ,\t\n") ); int l = atoi( strtok(NULL, " ,\t\n") ); float val = atof( strtok(NULL, " ,\t\n") ); int indx = i*norbs3 + j*norbs2 + k*norbs + l; twoRDM[indx] = val; } fclose(fp); } // This function is used to create the header for the FCI dump file void writeFDHead ( FILE* fOut, size_t norb, size_t nelec, size_t ms, int* orbsym ) { fprintf(fOut, " &FCI NORB=%zu ,NELEC=%zu ,MS2=%zu,\n", norb, nelec, ms); fprintf(fOut, " ORBSYM="); size_t i; for ( i = 0; i < norb; ++i ) { fprintf(fOut,"%d,", orbsym[i]); } fprintf(fOut, "\n ISYM=1,\n &END\n"); } // Only for 8-fold symmetry void writeERI ( FILE* fOut, double * eri, size_t norb, double tol) { size_t ij = 0; size_t kl = 0; size_t ijkl = 0; size_t n2 = norb*norb; size_t n3 = n2*norb; size_t i,j,k,l; for ( i=0; i<norb; ++i ) { for( j=0; j<i+1; ++j ) { kl = 0; for ( k=0; k<i+1; ++k ) { for (l=0; l<k+1; ++l ) { if ( ij >= kl ) { if ( fabs(eri[ijkl]) > tol ) { fprintf(fOut, "%20.12e %zu %zu %zu %zu\n",eri[ ijkl ],i+1,j+1,k+1,l+1); } ++ijkl; } ++kl; } } ++ij; } } } void writeHCore ( FILE * fOut, double * h1e, size_t norb, double tol ) { int i,j; for ( i = 0; i < norb; ++i ) { for ( j = 0; j < i+1; ++j ) { if ( fabs( h1e[ i*norb + j ] ) > tol ) { fprintf( fOut, "%20.12e %d %d %d %d\n", h1e[ i*norb + j ], i+1, j+1, 0, 0 ); } } } } // This function is an alternative to pyscf.tools.fcidump.from_integral and // should be used when working with large active spaces. void fcidumpFromIntegral ( char * fileName, double * h1eff, double * eri, size_t norb, size_t nelec, double ecore, int* orbsym, size_t ms ) { FILE * fOut = fopen( fileName, "w"); writeFDHead( fOut, norb, nelec, ms, orbsym ); writeERI( fOut, eri, norb, 1e-12 ); writeHCore( fOut, h1eff, norb, 1e-12 ); fprintf( fOut, "%20.12f %d %d %d %d\n", ecore, 0,0,0,0); fclose(fOut); } /* Sum over m1s rows and m2s columns. The indx variable determines which index of m1 is summed over, e.g. 0 is the first index and so on. m1 should be the one-body matrix, m2 should be the unitary matrix. This function has a temporal cost of O(n^3). */ void multMat2D ( size_t n, double * m1, double * m2, double * mout, size_t indx ) { size_t i, j, k, ik, kj, ij, ki; if ( indx == 1 ) { for ( i = 0; i < n; ++i ) for ( j = 0; j < n; ++j ) { ij = j + i*n; mout[ij] = 0; for ( k = 0; k < n; ++k ) { ik = k + i*n; kj = j + k*n; mout[ij] += m1[ik] * m2[kj]; } } } else if ( indx == 0 ) { for ( i = 0; i < n; ++i ) for ( j = 0; j < n; ++j ) { ij = j + i*n; mout[ij] = 0; for ( k = 0; k < n; ++k ) { ki = i + k * n; kj = j + k * n; mout[ij] += m1[ki] * m2[kj]; } } } } /* Multiplies the matrices m1[n^4] by m2[n^2] and allows the user to choose the index of m1 to sum over using the indx parameter, e.g. 0 sums over the last index of m1. n is the common dimension of m1 and m2. mout is the output array for the multiplication. When used with two-body integrals, m1 shold be the eri (or intermediate eri) array and m2 should be the unitary matrix. This function has a temporal cost of O(n^5). NOTE: Chemistry notation is used for the two-body matrix indices, e.g. (ab|cd) = \int \phi_{a^+} \phi_b O_2 \phi_{c^+} \phi_d dx */ void multMat4D ( size_t n, double * m1, double * m2, double * mout, size_t indx ) { size_t n2 = n*n, n3 = n2*n; // Switch statement deals with the different indices to sum over. switch ( indx ) { case 0: #pragma omp parallel { size_t a,b,c,d,i; for ( a = 0; a < n; ++a ) for ( b = 0; b < n; ++b ) for ( c = 0; c < n; ++c ) for ( d = 0; d < n; ++d ) { size_t abcd = d + c*n + b*n2 + a*n3; mout[abcd] = 0; for ( i = 0; i < n; ++i ) { size_t ia = i*n + a; size_t ibcd = d + c*n + b*n2 + i*n3; mout[abcd] += m1[ibcd] * m2[ia]; } } } break; case 1: #pragma omp parallel { size_t i,b,c,d,j; for ( i = 0; i < n; ++i ) for ( b = 0; b < n; ++b ) for ( c = 0; c < n; ++c ) for ( d = 0; d < n; ++d ) { size_t ibcd = d + c*n + b*n2 + i*n3; mout[ibcd] = 0; for ( j = 0; j < n; ++j ) { size_t jb = j*n + b; size_t ijcd = d + c*n + j*n2 + i*n3; mout[ibcd] += m1[ijcd] * m2[jb]; } } } break; case 2: #pragma omp parallel { size_t i,j,c,d,k; for ( i = 0; i < n; ++i ) for ( j = 0; j < n; ++j ) for ( c = 0; c < n; ++c ) for ( d = 0; d < n; ++d ) { size_t ijcd = d + c*n + j*n2 + i*n3; mout[ijcd] = 0; for ( k = 0; k < n; ++k ) { size_t kc = k*n + c; size_t ijkd = d + k*n + j*n2 + i*n3; mout[ijcd] += m1[ijkd] * m2[kc]; } } } break; default: #pragma omp parallel { size_t i,j,k,l,d; for ( i = 0; i < n; ++i ) for ( j = 0; j < n; ++j ) for ( k = 0; k < n; ++k ) for ( d = 0; d < n; ++d ) { size_t ijkd = d + k*n + j*n2 + i*n3; mout[ijkd] = 0; for ( l = 0; l < n; ++l ) { size_t ld = l*n + d; size_t ijkl = l + k*n + j*n2 + i*n3; mout[ijkd] += m1[ijkl] * m2[ld]; } } } break; } } /* This transformation of h1 from MO to NO basis is performed with a temporal cost of O(n^3). */ void oneBodyTrans ( size_t norb, double * un, double * h1 ) { double mp[ norb * norb ]; // Initialize intermediate matrix multMat2D ( norb, h1, un, mp, 1 ); multMat2D ( norb, mp, un, h1, 0 ); } /* This function transforms the eri array sequentially from the MO to NO basis. Like oneBodyTrans() this function uses no special algorithms to optimize performance and has a time cost of O(n^5). */ void twoBodyTrans ( size_t norb, double * un, double * eri ) { size_t norb4 = norb * norb * norb * norb; double interMat1[ norb4 ]; multMat4D( norb, eri, un, interMat1, 3 ); multMat4D( norb, interMat1, un, eri, 2 ); multMat4D( norb, eri, un, interMat1, 1 ); multMat4D( norb, interMat1, un, eri, 0 ); } //}
PlasticStrainMapping.h
/****************************************************************************** * SOFA, Simulation Open-Framework Architecture, development version * * (c) 2006-2017 INRIA, USTL, UJF, CNRS, MGH * * * * This program is free software; you can redistribute it and/or modify it * * under the terms of the GNU Lesser General Public License as published by * * the Free Software Foundation; either version 2.1 of the License, or (at * * your option) any later version. * * * * This program is distributed in the hope that it will be useful, but WITHOUT * * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License * * for more details. * * * * You should have received a copy of the GNU Lesser General Public License * * along with this program. If not, see <http://www.gnu.org/licenses/>. * ******************************************************************************* * Authors: The SOFA Team and external contributors (see Authors.txt) * * * * Contact information: contact@sofa-framework.org * ******************************************************************************/ #ifndef SOFA_COMPONENT_MAPPING_PlasticStrainMAPPING_H #define SOFA_COMPONENT_MAPPING_PlasticStrainMAPPING_H #include <Flexible/config.h> #include "BaseStrainMapping.h" #include "PlasticStrainJacobianBlock.h" #include "../types/StrainTypes.h" #include <sofa/helper/OptionsGroup.h> #include <sofa/helper/IndexOpenMP.h> namespace sofa { namespace component { namespace mapping { /// Decompose the total strain to an elastic strain + a plastic strain /// /// @author Matthieu Nesme /// template <class TStrain> class PlasticStrainMapping : public BaseStrainMappingT<defaulttype::PlasticStrainJacobianBlock<TStrain> > { public: typedef defaulttype::PlasticStrainJacobianBlock<TStrain> BlockType; typedef BaseStrainMappingT<BlockType> Inherit; typedef typename Inherit::Real Real; SOFA_CLASS(SOFA_TEMPLATE(PlasticStrainMapping,TStrain), SOFA_TEMPLATE(BaseStrainMappingT,BlockType)); /// @name Different ways to decompose the strain //@{ enum PlasticMethod { ADDITION=0, MULTIPLICATION, NB_PlasticMethod }; ///< ADDITION -> Müller method (faster), MULTIPLICATION -> Fedkiw method [Irving04] Data<helper::OptionsGroup> f_method; //@} /// @name Plasticity parameters such as "Interactive Virtual Materials", Muller & Gross, GI 2004 //@{ Data<helper::vector<Real> > _max; Data<helper::vector<Real> > _yield; helper::vector<Real> _squaredYield; Data<helper::vector<Real> > _creep; ///< this parameter is different from the article, here it includes the multiplication by dt //@} virtual void reinit() { _squaredYield.resize(_yield.getValue().size()); for(size_t i=0;i<_yield.getValue().size();i++) _squaredYield[i] = _yield.getValue()[i] * _yield.getValue()[i]; Inherit::reinit(); } virtual void reset() { //serr<<"PlasticStrainMapping::reset"<<sendl; Inherit::reset(); for( size_t i=0 ; i<this->jacobian.size() ; i++ ) this->jacobian[i].reset(); } protected: PlasticStrainMapping( core::State<TStrain>* from = NULL, core::State<TStrain>* to = NULL ) : Inherit ( from, to ) , f_method ( initData ( &f_method,"method","" ) ) , _max(initData(&_max,helper::vector<Real>((int)1,(Real)0.1f),"max","Plastic Max Threshold (2-norm of the strain)")) , _yield(initData(&_yield,helper::vector<Real>((int)1,(Real)0.0001f),"yield","Plastic Yield Threshold (2-norm of the strain)")) , _creep(initData(&_creep,helper::vector<Real>((int)1,(Real)1.f),"creep","Plastic Creep Factor * dt [0,1]. 1 <-> pure plastic ; <1 <-> visco-plastic (warning depending on dt)")) { helper::OptionsGroup Options; Options.setNbItems( NB_PlasticMethod ); Options.setItemName( ADDITION, "addition" ); Options.setItemName( MULTIPLICATION, "multiplication" ); Options.setSelectedItem( ADDITION ); f_method.setValue( Options ); } virtual ~PlasticStrainMapping() { } virtual void apply( const core::MechanicalParams * /*mparams*/ , Data<typename Inherit::OutVecCoord>& dOut, const Data<typename Inherit::InVecCoord>& dIn ) { helper::ReadAccessor<Data<typename Inherit::InVecCoord> > inpos (*this->fromModel->read(core::ConstVecCoordId::position())); helper::ReadAccessor<Data<typename Inherit::OutVecCoord> > outpos (*this->toModel->read(core::ConstVecCoordId::position())); if(inpos.size()!=outpos.size()) this->resizeOut(); typename Inherit::OutVecCoord& out = *dOut.beginWriteOnly(); const typename Inherit::InVecCoord& in = dIn.getValue(); switch( f_method.getValue().getSelectedId() ) { case MULTIPLICATION: { #ifdef _OPENMP #pragma omp parallel for if (this->d_parallel.getValue()) #endif for(sofa::helper::IndexOpenMP<unsigned int>::type i=0 ; i<this->jacobian.size() ; i++ ) { out[i] = typename Inherit::OutCoord(); Real Max=(_max.getValue().size()<=i)?_max.getValue()[0]:_max.getValue()[i],SquaredYield=(_squaredYield.size()<=i)?_squaredYield[0]:_squaredYield[i] ,Creep=(_creep.getValue().size()<=i)?_creep.getValue()[0]:_creep.getValue()[i]; this->jacobian[i].addapply_multiplication( out[i], in[i], Max, SquaredYield, Creep ); } break; } case ADDITION: { #ifdef _OPENMP #pragma omp parallel for if (this->d_parallel.getValue()) #endif for(sofa::helper::IndexOpenMP<unsigned int>::type i=0 ; i<this->jacobian.size() ; i++ ) { out[i] = typename Inherit::OutCoord(); Real Max=(_max.getValue().size()<=i)?_max.getValue()[0]:_max.getValue()[i],SquaredYield=(_squaredYield.size()<=i)?_squaredYield[0]:_squaredYield[i] ,Creep=(_creep.getValue().size()<=i)?_creep.getValue()[0]:_creep.getValue()[i]; this->jacobian[i].addapply_addition( out[i], in[i], Max, SquaredYield, Creep ); } break; } } dOut.endEdit(); } }; // class PlasticStrainMapping } // namespace mapping } // namespace component } // namespace sofa #endif // SOFA_COMPONENT_MAPPING_PlasticStrainMAPPING_H
private-clause.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num()0 #endif main(){ int i,n=7; int a[n], suma; for(i=0;i<n;i++) a[i]=i; #pragma omp parallel private(suma) { suma=0; #pragma omp for for(i=0; i<n; i++){ suma=suma+a[i]; printf ("thread %d suma a[%d]/ ",omp_get_thread_num(),i); } printf("\nthread %d suma= %d",omp_get_thread_num(),suma); } printf("suma:\n", suma); }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(8*t1+Ny+7,32)),floord(16*t2+Ny+3,32)),floord(16*t1-16*t2+Nz+Ny+5,32));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1011,1024)),ceild(32*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(8*t1+Nx+7,1024)),floord(16*t2+Nx+3,1024)),floord(32*t3+Nx+19,1024)),floord(16*t1-16*t2+Nz+Nx+5,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),8*t3+6),256*t4+254);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
wshfl.c
/* Copyright 2018-2019. Massachusetts Institute of Technology. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2018-2019 Siddharth Iyer <ssi@mit.edu> * * Tamir J, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M. * T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging. * Magnetic resonance in medicine. 2017 Jan 1;77(1):180-95. * * B Bilgic, BA Gagoski, SF Cauley, AP Fan, JR Polimeni, PE Grant, * LL Wald, and K Setsompop, Wave-CAIPI for highly accelerated 3D * imaging. Magn Reson Med (2014) doi: 10.1002/mrm.25347 * * Iyer S, Bilgic B, Setsompop K. * Faster T2 shuffling with Wave. * Presented in the session: "Signal Encoding and Decoding" at ISMRM 2018. * https://www.ismrm.org/18/program_files/O67.htm */ #include <stdbool.h> #include <complex.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "num/multind.h" #include "num/flpmath.h" #include "num/fft.h" #include "num/init.h" #include "num/iovec.h" #include "num/ops.h" #include "num/ops_p.h" #ifdef USE_CUDA #include "num/gpuops.h" #endif #include "iter/iter.h" #include "iter/lsqr.h" #include "iter/misc.h" #include "linops/linop.h" #include "linops/fmac.h" #include "linops/someops.h" #include "linops/decompose_complex.h" #include "misc/debug.h" #include "misc/mri.h" #include "misc/utils.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/opts.h" #include "wavelet/wavthresh.h" #include "lowrank/lrthresh.h" #include "grecon/optreg.h" #include "grecon/italgo.h" static const char usage_str[] = "<maps> <wave> <phi> <reorder> <table> <output>"; static const char help_str[] = "Perform a wave-shuffling reconstruction.\n\n" "Conventions:\n" " * (sx, sy, sz) - Spatial dimensions.\n" " * wx - Extended FOV in READ_DIM due to\n" " wave's voxel spreading.\n" " * (nc, md) - Number of channels and ESPIRiT's \n" " extended-SENSE model operator\n" " dimensions (or # of maps).\n" " * (tf, tk) - Turbo-factor and the rank\n" " of the temporal basis used in\n" " shuffling.\n" " * ntr - Number of TRs, or the number of\n" " (ky, kz) points acquired of one\n" " echo image.\n" " * n - Total number of (ky, kz) points\n" " acquired. This is equal to the\n" " product of ntr and tf.\n\n" "Descriptions:\n" " * reorder is an (n by 3) index matrix such that\n" " [ky, kz, t] = reorder(i, :) represents the\n" " (ky, kz) kspace position of the readout line\n" " acquired at echo number (t), and 0 <= ky < sy,\n" " 0 <= kz < sz, 0 <= t < tf).\n" " * table is a (wx by nc by n) matrix such that\n" " table(:, :, k) represents the kth multichannel\n" " kspace line.\n\n" "Expected dimensions:\n" " * maps - ( sx, sy, sz, nc, md, 1, 1)\n" " * wave - ( wx, sy, sz, 1, 1, 1, 1)\n" " * phi - ( 1, 1, 1, 1, 1, tf, tk)\n" " * output - ( sx, sy, sz, 1, md, 1, tk)\n" " * reorder - ( n, 3, 1, 1, 1, 1, 1)\n" " * table - ( wx, nc, n, 1, 1, 1, 1)"; /* Helper function to print out operator dimensions. */ static void print_opdims(const struct linop_s* op) { const struct iovec_s* domain = linop_domain(op); const struct iovec_s* codomain = linop_codomain(op); debug_printf(DP_INFO, "\tDomain: ["); for (long k = 0; k < domain->N; k ++) debug_printf(DP_INFO, "%6ld", domain->dims[k]); debug_printf(DP_INFO, "]\n"); debug_printf(DP_INFO, "\tCodomain: ["); for (long k = 0; k < codomain->N; k ++) debug_printf(DP_INFO, "%6ld", codomain->dims[k]); debug_printf(DP_INFO, "]\n"); } /* Construct sampling mask array from reorder tables. */ static void construct_mask( long reorder_dims[DIMS], complex float* reorder, long mask_dims[DIMS], complex float* mask) { long n = reorder_dims[0]; long sy = mask_dims[1]; long sz = mask_dims[2]; long y = 0; long z = 0; long t = 0; for (int i = 0; i < n; i++) { y = lround(creal(reorder[i])); z = lround(creal(reorder[i + n])); t = lround(creal(reorder[i + 2 * n])); mask[(y + z * sy) + t * sy * sz] = 1; } } struct kern_s { INTERFACE(linop_data_t); unsigned int N; long* reorder_dims; // Dimension of the index table: ( n, 3, 1, 1, 1, 1, 1, 1) long* phi_dims; // Dimension of the temporal basis: ( 1, 1, 1, 1, 1, tf, tk, 1) long* table_dims; // Dimension of the data table: (wx, nc, n, 1, 1, 1, 1, 1) long* kernel_dims; // Dimension of the kernel: ( 1, sy, sz, 1, 1, 1, tk, tk) complex float* reorder; complex float* phi; complex float* kernel; complex float* gpu_kernel; }; static DEF_TYPEID(kern_s); /* Go to table from coefficient-kspace with memory efficiency. */ static void kern_apply(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long n = data->reorder_dims[0]; long tf = data->phi_dims[5]; long tk = data->phi_dims[6]; long input_dims[] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = wx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long perm_dims[] = { [0 ... DIMS - 1] = 1 }; perm_dims[0] = wx; perm_dims[1] = nc; perm_dims[3] = tk; perm_dims[4] = sy; perm_dims[5] = sz; complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, src); unsigned int permute_order[DIMS] = {0, 3, 5, 6, 1, 2, 4, 7}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, perm_dims, perm, input_dims, src, CFL_SIZE); long vec_dims[] = {wx, nc, tf, 1}; long phi_mat_dims[] = { 1, 1, tf, tk}; long phi_in_dims[] = {wx, nc, 1, tk}; long fmac_dims[] = {wx, nc, tf, tk}; long line_dims[] = {wx, nc, 1, 1}; complex float* vec = md_alloc_sameplace(4, vec_dims, CFL_SIZE, src); long vec_str[4]; md_calc_strides(4, vec_str, vec_dims, CFL_SIZE); long phi_mat_str[4]; md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE); long phi_in_str[4]; md_calc_strides(4, phi_in_str, phi_in_dims, CFL_SIZE); long fmac_str[4]; md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE); int y = -1; int z = -1; int t = -1; for (int i = 0; i < n; i ++) { y = lround(creal(data->reorder[i])); z = lround(creal(data->reorder[i + n])); t = lround(creal(data->reorder[i + 2 * n])); md_clear(4, vec_dims, vec, CFL_SIZE); md_zfmac2(4, fmac_dims, vec_str, vec, phi_in_str, (perm + ((wx * nc * tk) * (y + z * sy))), phi_mat_str, data->phi); md_copy(4, line_dims, dst + (i * wx * nc), vec + (t * wx * nc), CFL_SIZE); } md_free(perm); md_free(vec); } /* Collapse data table into the temporal basis for memory efficiency. */ static void kern_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src) { struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long n = data->reorder_dims[0]; long tf = data->phi_dims[5]; long tk = data->phi_dims[6]; long perm_dims[] = { [0 ... DIMS - 1] = 1 }; perm_dims[0] = wx; perm_dims[1] = nc; perm_dims[3] = tk; perm_dims[4] = sy; perm_dims[5] = sz; complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, dst); md_clear(DIMS, perm_dims, perm, CFL_SIZE); #ifdef _OPENMP long num_threads = omp_get_max_threads(); #else long num_threads = 1; #endif long vec_dims[] = {wx, nc, tf, 1}; long phi_mat_dims[] = { 1, 1, tf, tk}; long phi_out_dims[] = {wx, nc, 1, tk}; long fmac_dims[] = {wx, nc, tf, tk}; long line_dims[] = {wx, nc, 1, 1}; long vthrd_dims[] = {wx, nc, tf, 1, num_threads}; complex float* vec = md_alloc_sameplace(5, vthrd_dims, CFL_SIZE, dst); md_clear(5, vthrd_dims, vec, CFL_SIZE); long vec_str[4]; md_calc_strides(4, vec_str, vec_dims, CFL_SIZE); long phi_mat_str[4]; md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE); long phi_out_str[4]; md_calc_strides(4, phi_out_str, phi_out_dims, CFL_SIZE); long fmac_str[4]; md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE); long flag_dims[1] = { n }; complex float* flags = md_calloc(1, flag_dims, CFL_SIZE); #pragma omp parallel for for (int k = 0; k < n; k ++) { #ifdef _OPENMP int tid = omp_get_thread_num(); #else int tid = 0; #endif int y = lround(creal(data->reorder[k])); int z = lround(creal(data->reorder[k + n])); int t = -1; if (0 == flags[k]) { md_clear(4, vec_dims, vec + (wx * nc * tf * tid), CFL_SIZE); for (int i = k; i < n; i ++) { if ((y == lround(creal(data->reorder[i]))) && (z == lround(creal(data->reorder[i + n])))) { flags[i] = 1; t = lround(creal(data->reorder[i + 2 * n])); md_copy(4, line_dims, (vec + (wx * nc * tf * tid) + t * wx * nc), (src + i * wx * nc), CFL_SIZE); } } md_zfmacc2(4, fmac_dims, phi_out_str, perm + (y + z * sy) * (wx * nc * tk), vec_str, vec + (wx * nc * tf * tid), phi_mat_str, data->phi); } } long out_dims[] = { [0 ... DIMS - 1] = 1 }; out_dims[0] = wx; out_dims[1] = sy; out_dims[2] = sz; out_dims[3] = nc; out_dims[6] = tk; unsigned int permute_order[DIMS] = {0, 4, 5, 1, 6, 2, 3, 7}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, out_dims, dst, perm_dims, perm, CFL_SIZE); md_free(vec); md_free(perm); md_free(flags); } static void kern_normal(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long tk = data->phi_dims[6]; long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = wx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long input_str[DIMS]; md_calc_strides(DIMS, input_str, input_dims, CFL_SIZE); long output_dims[DIMS]; md_copy_dims(DIMS, output_dims, input_dims); output_dims[6] = 1; output_dims[7] = tk; long output_str[DIMS]; md_calc_strides(DIMS, output_str, output_dims, CFL_SIZE); long gpu_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, gpu_kernel_dims, data->kernel_dims); gpu_kernel_dims[0] = wx; gpu_kernel_dims[3] = nc; long kernel_str[DIMS]; md_calc_strides(DIMS, kernel_str, data->kernel_dims, CFL_SIZE); long gpu_kernel_str[DIMS]; md_calc_strides(DIMS, gpu_kernel_str, gpu_kernel_dims, CFL_SIZE); long fmac_dims[DIMS]; md_merge_dims(DIMS, fmac_dims, input_dims, data->kernel_dims); md_clear(DIMS, output_dims, dst, CFL_SIZE); #ifdef USE_CUDA if(cuda_ondevice(src)) md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, gpu_kernel_str, data->gpu_kernel); else #endif md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, kernel_str, data->kernel); } static void kern_free(const linop_data_t* _data) { const struct kern_s* data = CAST_DOWN(kern_s, _data); xfree(data->reorder_dims); xfree(data->phi_dims); xfree(data->table_dims); xfree(data->kernel_dims); #ifdef USE_CUDA if (data->gpu_kernel != NULL) md_free(data->gpu_kernel); #endif xfree(data); } static const struct linop_s* linop_kern_create(bool gpu_flag, const long _reorder_dims[DIMS], complex float* reorder, const long _phi_dims[DIMS], complex float* phi, const long _kernel_dims[DIMS], complex float* kernel, const long _table_dims[DIMS]) { PTR_ALLOC(struct kern_s, data); SET_TYPEID(kern_s, data); PTR_ALLOC(long[DIMS], reorder_dims); PTR_ALLOC(long[DIMS], phi_dims); PTR_ALLOC(long[DIMS], table_dims); PTR_ALLOC(long[DIMS], kernel_dims); md_copy_dims(DIMS, *reorder_dims, _reorder_dims); md_copy_dims(DIMS, *phi_dims, _phi_dims); md_copy_dims(DIMS, *table_dims, _table_dims); md_copy_dims(DIMS, *kernel_dims, _kernel_dims); data->reorder_dims = *PTR_PASS(reorder_dims); data->phi_dims = *PTR_PASS(phi_dims); data->table_dims = *PTR_PASS(table_dims); data->kernel_dims = *PTR_PASS(kernel_dims); data->reorder = reorder; data->phi = phi; data->kernel = kernel; data->gpu_kernel = NULL; #ifdef USE_CUDA if(gpu_flag) { long repmat_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, repmat_kernel_dims, _kernel_dims); repmat_kernel_dims[0] = _table_dims[0]; repmat_kernel_dims[3] = _table_dims[1]; long kernel_strs[DIMS]; long repmat_kernel_strs[DIMS]; md_calc_strides(DIMS, kernel_strs, _kernel_dims, CFL_SIZE); md_calc_strides(DIMS, repmat_kernel_strs, repmat_kernel_dims, CFL_SIZE); complex float* repmat_kernel = md_calloc(DIMS, repmat_kernel_dims, CFL_SIZE); md_copy2(DIMS, repmat_kernel_dims, repmat_kernel_strs, repmat_kernel, kernel_strs, kernel, CFL_SIZE); data->gpu_kernel = md_gpu_move(DIMS, repmat_kernel_dims, repmat_kernel, CFL_SIZE); md_free(repmat_kernel); } #else UNUSED(gpu_flag); #endif long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = _table_dims[0]; input_dims[1] = _kernel_dims[1]; input_dims[2] = _kernel_dims[2]; input_dims[3] = _table_dims[1]; input_dims[6] = _phi_dims[6]; long output_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; output_dims[0] = _table_dims[0]; output_dims[1] = _table_dims[1]; output_dims[2] = _reorder_dims[0]; const struct linop_s* K = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), kern_apply, kern_adjoint, kern_normal, NULL, kern_free); return K; } struct multc_s { INTERFACE(linop_data_t); unsigned int nc; unsigned int md; const complex float* maps; const struct linop_s* sc_op; // Single channel operator. }; static DEF_TYPEID(multc_s); static void multc_apply(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct multc_s* data = CAST_DOWN(multc_s, _data); // Loading single channel operator. const struct operator_s* fwd = data->sc_op->forward; const long* sc_inp_dims = linop_domain(data->sc_op)->dims; const long* sc_out_dims = linop_codomain(data->sc_op)->dims; long sx = sc_inp_dims[0]; long sy = sc_inp_dims[1]; long sz = sc_inp_dims[2]; long wx = sc_out_dims[0]; long n = sc_out_dims[2]; long nc = data->nc; long md = data->md; long src_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, src_dims, sc_inp_dims); src_dims[MAPS_DIM] = md; long dst_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, dst_dims, sc_out_dims); dst_dims[1] = nc; long map_dims[] = { [0 ... DIMS - 1] = 1}; map_dims[0] = sx; map_dims[1] = sy; map_dims[2] = sz; map_dims[3] = nc; map_dims[4] = md; long single_map_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_map_dims, map_dims); single_map_dims[COIL_DIM] = 1; complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src); complex float* buffer = md_alloc_sameplace(DIMS, sc_inp_dims, CFL_SIZE, src); long tbl_dims[] = { [0 ... DIMS - 1] = 1}; tbl_dims[0] = wx; tbl_dims[1] = n; tbl_dims[2] = nc; complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src); md_clear(DIMS, tbl_dims, tbl, CFL_SIZE); long pos[] = { [0 ... DIMS - 1] = 0 }; long zfmac_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, zfmac_dims, src_dims); long strides_single_map[DIMS]; md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE); long strides_src[DIMS]; md_calc_strides(DIMS, strides_src, src_dims, CFL_SIZE); long strides_sc_inp[DIMS]; md_calc_strides(DIMS, strides_sc_inp, sc_inp_dims, CFL_SIZE); for (long k = 0; k < data->nc; k++) { md_clear(DIMS, single_map_dims, single_map, CFL_SIZE); md_clear(DIMS, sc_inp_dims, buffer, CFL_SIZE); pos[COIL_DIM] = k; md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE); pos[COIL_DIM] = 0; md_zfmac2(DIMS, zfmac_dims, strides_sc_inp, buffer, strides_src, src, strides_single_map, single_map); operator_apply(fwd, DIMS, sc_out_dims, tbl + (wx * n * k), DIMS, sc_inp_dims, buffer); } md_clear(DIMS, dst_dims, dst, CFL_SIZE); unsigned int permute_order[DIMS] = {0, 2, 1}; for (unsigned int i = 3; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, dst_dims, dst, tbl_dims, tbl, CFL_SIZE); md_free(single_map); md_free(buffer); md_free(tbl); } static void multc_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct multc_s* data = CAST_DOWN(multc_s, _data); // Loading single channel operator. const struct operator_s* adj = data->sc_op->adjoint; const long* sc_inp_dims = linop_codomain(data->sc_op)->dims; const long* sc_out_dims = linop_domain(data->sc_op)->dims; long sx = sc_out_dims[0]; long sy = sc_out_dims[1]; long sz = sc_out_dims[2]; long wx = sc_inp_dims[0]; long n = sc_inp_dims[2]; long nc = data->nc; long md = data->md; long src_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, src_dims, sc_inp_dims); src_dims[1] = nc; long dst_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, dst_dims, sc_out_dims); dst_dims[MAPS_DIM] = md; long map_dims[] = { [0 ... DIMS - 1] = 1}; map_dims[0] = sx; map_dims[1] = sy; map_dims[2] = sz; map_dims[3] = nc; map_dims[4] = md; long single_map_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_map_dims, map_dims); single_map_dims[COIL_DIM] = 1; complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src); complex float* buffer1 = md_alloc_sameplace(DIMS, sc_out_dims, CFL_SIZE, src); complex float* buffer2 = md_alloc_sameplace(DIMS, dst_dims, CFL_SIZE, src); long tbl_dims[] = { [0 ... DIMS - 1] = 1}; tbl_dims[0] = wx; tbl_dims[2] = n; complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src); long pos[] = { [0 ... DIMS - 1] = 0 }; long strides_single_map[DIMS]; md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE); long strides_sc_out[DIMS]; md_calc_strides(DIMS, strides_sc_out, sc_out_dims, CFL_SIZE); long strides_dst[DIMS]; md_calc_strides(DIMS, strides_dst, dst_dims, CFL_SIZE); md_clear(DIMS, dst_dims, dst, CFL_SIZE); for (long k = 0; k < data->nc; k++) { md_clear(DIMS, single_map_dims, single_map, CFL_SIZE); md_clear(DIMS, sc_out_dims, buffer1, CFL_SIZE); md_clear(DIMS, dst_dims, buffer2, CFL_SIZE); md_clear(DIMS, tbl_dims, tbl, CFL_SIZE); pos[1] = k; md_slice(DIMS, 2, pos, src_dims, tbl, src, CFL_SIZE); pos[1] = 0; operator_apply(adj, DIMS, sc_out_dims, buffer1, DIMS, tbl_dims, tbl); pos[COIL_DIM] = k; md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE); pos[COIL_DIM] = 0; md_zfmacc2(DIMS, dst_dims, strides_dst, buffer2, strides_sc_out, buffer1, strides_single_map, single_map); md_zadd(DIMS, dst_dims, dst, dst, buffer2); } md_free(single_map); md_free(buffer1); md_free(buffer2); md_free(tbl); } static void multc_normal(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct multc_s* data = CAST_DOWN(multc_s, _data); // Loading single channel operator. const struct operator_s* nrm = data->sc_op->normal; const long* sc_dims = linop_domain(data->sc_op)->dims; long sx = sc_dims[0]; long sy = sc_dims[1]; long sz = sc_dims[2]; long nc = data->nc; long md = data->md; long dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, dims, sc_dims); dims[MAPS_DIM] = md; long map_dims[] = { [0 ... DIMS - 1] = 1}; map_dims[0] = sx; map_dims[1] = sy; map_dims[2] = sz; map_dims[3] = nc; map_dims[4] = md; long single_map_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_map_dims, map_dims); single_map_dims[COIL_DIM] = 1; complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src); complex float* buffer1 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src); complex float* buffer2 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src); complex float* buffer3 = md_alloc_sameplace(DIMS, dims, CFL_SIZE, src); long pos[] = { [0 ... DIMS - 1] = 0 }; long strides_single_map[DIMS]; md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE); long strides_sc[DIMS]; md_calc_strides(DIMS, strides_sc, sc_dims, CFL_SIZE); long strides[DIMS]; md_calc_strides(DIMS, strides, dims, CFL_SIZE); md_clear(DIMS, dims, dst, CFL_SIZE); for (long k = 0; k < data->nc; k++) { md_clear(DIMS, single_map_dims, single_map, CFL_SIZE); md_clear(DIMS, sc_dims, buffer1, CFL_SIZE); md_clear(DIMS, sc_dims, buffer2, CFL_SIZE); md_clear(DIMS, dims, buffer3, CFL_SIZE); pos[COIL_DIM] = k; md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE); pos[COIL_DIM] = 0; md_zfmac2(DIMS, dims, strides_sc, buffer1, strides, src, strides_single_map, single_map); operator_apply(nrm, DIMS, sc_dims, buffer2, DIMS, sc_dims, buffer1); md_zfmacc2(DIMS, dims, strides, buffer3, strides_sc, buffer2, strides_single_map, single_map); md_zadd(DIMS, dims, dst, dst, buffer3); } md_free(single_map); md_free(buffer1); md_free(buffer2); md_free(buffer3); } static void multc_free(const linop_data_t* _data) { const struct multc_s* data = CAST_DOWN(multc_s, _data); xfree(data); } static struct linop_s* linop_multc_create(long nc, long md, const complex float* maps, const struct linop_s* sc_op) { PTR_ALLOC(struct multc_s, data); SET_TYPEID(multc_s, data); data->nc = nc; data->md = md; data->maps = maps; data->sc_op = sc_op; long* op_inp_dims = (long*) linop_domain(sc_op)->dims; long* op_out_dims = (long*) linop_codomain(sc_op)->dims; long input_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, input_dims, op_inp_dims); input_dims[MAPS_DIM] = md; long output_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, output_dims, op_out_dims); output_dims[1] = nc; struct linop_s* E = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), multc_apply, multc_adjoint, multc_normal, NULL, multc_free); return E; } /* Resize operator. */ static const struct linop_s* linop_wavereshape_create(long wx, long sx, long sy, long sz, long nc, long tk) { long input_dims[] = { [0 ... DIMS - 1] = 1}; input_dims[0] = sx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long output_dims[DIMS]; md_copy_dims(DIMS, output_dims, input_dims); output_dims[0] = wx; struct linop_s* R = linop_resize_create(DIMS, output_dims, input_dims); return R; } /* Fx operator. */ static const struct linop_s* linop_fx_create(long wx, long sy, long sz, long nc, long tk, bool centered) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; struct linop_s* Fx = NULL; if (centered) Fx = linop_fftc_create(DIMS, dims, READ_FLAG); else Fx = linop_fft_create(DIMS, dims, READ_FLAG); return Fx; } /* Wave operator. */ static const struct linop_s* linop_wave_create(long wx, long sy, long sz, long nc, long tk, long psf_tk, complex float* psf) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; return (psf_tk > 1) ? linop_cdiag_create(DIMS, dims, FFT_FLAGS | COEFF_FLAG, psf) : linop_cdiag_create(DIMS, dims, FFT_FLAGS, psf); } /* Fyz operator. */ static const struct linop_s* linop_fyz_create(long wx, long sy, long sz, long nc, long tk, bool centered) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; struct linop_s* Fyz = NULL; if (centered) Fyz = linop_fftc_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG); else Fyz = linop_fft_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG); return Fyz; } /* Construction sampling temporal kernel.*/ static void construct_kernel( long mask_dims[DIMS], complex float* mask, long phi_dims[DIMS], complex float* phi, long kern_dims[DIMS], complex float* kern) { long sy = mask_dims[1]; long sz = mask_dims[2]; long tf = phi_dims[5]; long tk = phi_dims[6]; long cvec_dims[] = { [0 ... DIMS - 1] = 1 }; cvec_dims[6] = tk; long cvec_str[DIMS]; md_calc_strides(DIMS, cvec_str, cvec_dims, CFL_SIZE); complex float cvec[tk]; long tvec_dims[] = { [0 ... DIMS - 1] = 1 }; tvec_dims[5] = tf; long tvec_str[DIMS]; md_calc_strides(DIMS, tvec_str, tvec_dims, CFL_SIZE); complex float mvec[tf]; complex float tvec1[tf]; complex float tvec2[tf]; long phi_str[DIMS]; md_calc_strides(DIMS, phi_str, phi_dims, CFL_SIZE); long out_dims[] = { [0 ... DIMS - 1] = 1 }; out_dims[0] = tk; out_dims[1] = sy; out_dims[2] = sz; out_dims[3] = tk; complex float* out = md_calloc(DIMS, out_dims, CFL_SIZE); for (int y = 0; y < sy; y ++) { for (int z = 0; z < sz; z ++) { for (int t = 0; t < tf; t ++) mvec[t] = mask[(y + sy * z) + (sy * sz) * t]; for (int t = 0; t < tk; t ++) { cvec[t] = 1; md_clear(DIMS, tvec_dims, tvec1, CFL_SIZE); md_zfmac2(DIMS, phi_dims, tvec_str, tvec1, cvec_str, cvec, phi_str, phi); md_clear(DIMS, tvec_dims, tvec2, CFL_SIZE); md_zfmac2(DIMS, tvec_dims, tvec_str, tvec2, tvec_str, tvec1, tvec_str, mvec); md_clear(DIMS, cvec_dims, out + y * tk + z * sy * tk + t * sy * sz * tk, CFL_SIZE); md_zfmacc2(DIMS, phi_dims, cvec_str, out + y * tk + z * sy * tk + t * sy * sz * tk, tvec_str, tvec2, phi_str, phi); cvec[t] = 0; } } } unsigned int permute_order[DIMS] = {4, 1, 2, 5, 6, 7, 3, 0}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, kern_dims, kern, out_dims, out, CFL_SIZE); md_free(out); } static void fftmod_apply(long sy, long sz, long reorder_dims[DIMS], complex float* reorder, long table_dims[DIMS], complex float* table, long maps_dims[DIMS], complex float* maps) { long wx = table_dims[0]; long nc = table_dims[1]; fftmod(DIMS, table_dims, READ_FLAG, table, table); fftmod(DIMS, maps_dims, FFT_FLAGS, maps, maps); long y = -1; long z = -1; double dy = ((double) sy/2)/((double) sy); double dz = ((double) sz/2)/((double) sz); complex float py = 1; complex float pz = 1; long dims[] = { [0 ... DIMS] = 1}; dims[0] = wx; dims[1] = nc; long n = reorder_dims[0]; for (long k = 0; k < n; k++) { y = lround(creal(reorder[k])); z = lround(creal(reorder[k + n])); py = cexp(2.i * M_PI * dy * y); pz = cexp(2.i * M_PI * dz * z); md_zsmul(DIMS, dims, table + k * wx * nc, table + k * wx * nc, py * pz); } } int main_wshfl(int argc, char* argv[]) { double start_time = timestamp(); struct opt_reg_s ropts; opt_reg_init(&ropts); int maxiter = 30; int cgiter = 10; int blksize = 8; float rho = 1; bool hgwld = false; bool ksp = false; const char* fwd = NULL; const char* x0 = NULL; int gpun = -1; bool dcx = false; const struct opt_s opts[] = { { 'R', true, opt_reg, &ropts, "<T>:A:B:C\tGeneralized regularization options. (-Rh for help)" }, OPT_INT( 'b', &blksize, "blkdim", "Block size for locally low rank."), OPT_INT( 'i', &maxiter, "mxiter", "Maximum number of iterations."), OPT_INT( 'j', &cgiter, "cgiter", "Maximum number of CG iterations in ADMM."), OPT_FLOAT( 's', &rho, "admrho", "ADMM Rho value."), OPT_STRING( 'F', &fwd, "frwrd", "Go from shfl-coeffs to data-table. Pass in coeffs path."), OPT_STRING( 'O', &x0, "initl", "Initialize reconstruction with guess."), OPT_INT( 'g', &gpun, "gpunm", "GPU device number."), OPT_SET( 'K', &ksp, "Go from data-table to shuffling basis k-space."), OPT_SET( 'H', &hgwld, "Use hogwild."), OPT_SET( 'v', &dcx, "Split coefficients to real and imaginary components."), }; cmdline(&argc, argv, 6, 6, usage_str, help_str, ARRAY_SIZE(opts), opts); struct admm_conf admm = { false, false, false, rho, cgiter }; debug_printf(DP_INFO, "Loading data... "); long maps_dims[DIMS]; complex float* maps = load_cfl(argv[1], DIMS, maps_dims); long wave_dims[DIMS]; complex float* wave = load_cfl(argv[2], DIMS, wave_dims); long phi_dims[DIMS]; complex float* phi = load_cfl(argv[3], DIMS, phi_dims); long reorder_dims[DIMS]; complex float* reorder = load_cfl(argv[4], DIMS, reorder_dims); long table_dims[DIMS]; complex float* table = load_cfl(argv[5], DIMS, table_dims); debug_printf(DP_INFO, "Done.\n"); if (gpun >= 0) num_init_gpu_device(gpun); else num_init(); int wx = wave_dims[0]; int sx = maps_dims[0]; int sy = maps_dims[1]; int sz = maps_dims[2]; int nc = maps_dims[3]; int md = maps_dims[4]; int tf = phi_dims[5]; int tk = phi_dims[6]; debug_printf(DP_INFO, "Constructing sampling mask from reorder table... "); long mask_dims[] = { [0 ... DIMS - 1] = 1 }; mask_dims[1] = sy; mask_dims[2] = sz; mask_dims[5] = tf; complex float* mask = md_calloc(DIMS, mask_dims, CFL_SIZE); construct_mask(reorder_dims, reorder, mask_dims, mask); debug_printf(DP_INFO, "Done.\n"); debug_printf(DP_INFO, "Constructing sampling-temporal kernel... "); long kernel_dims[] = { [0 ... DIMS - 1] = 1 }; kernel_dims[1] = sy; kernel_dims[2] = sz; kernel_dims[6] = tk; kernel_dims[7] = tk; complex float* kernel = md_calloc(DIMS, kernel_dims, CFL_SIZE); construct_kernel(mask_dims, mask, phi_dims, phi, kernel_dims, kernel); md_free(mask); debug_printf(DP_INFO, "Done.\n"); long coeff_dims[] = { [0 ... DIMS - 1] = 1 }; coeff_dims[0] = sx; coeff_dims[1] = sy; coeff_dims[2] = sz; coeff_dims[4] = md; coeff_dims[6] = tk; coeff_dims[8] = dcx ? 2 : 1; if (ksp == true) { const struct linop_s* Knc = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, table_dims); long ksp_dims[] = { [0 ... DIMS - 1] = 1 }; ksp_dims[0] = wx; ksp_dims[1] = sy; ksp_dims[2] = sz; ksp_dims[3] = nc; ksp_dims[6] = tk; complex float* res = create_cfl(argv[6], DIMS, ksp_dims); operator_apply(Knc->adjoint, DIMS, ksp_dims, res, DIMS, table_dims, table); linop_free(Knc); md_free(kernel); unmap_cfl(DIMS, maps_dims, maps); unmap_cfl(DIMS, wave_dims, wave); unmap_cfl(DIMS, phi_dims, phi); unmap_cfl(DIMS, reorder_dims, reorder); unmap_cfl(DIMS, table_dims, table); unmap_cfl(DIMS, ksp_dims, res); return 0; } debug_printf(DP_INFO, "Creating single channel linear operators:\n"); double t1; double t2; t1 = timestamp(); const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk); t2 = timestamp(); debug_printf(DP_INFO, "\tR: %f seconds.\n", t2 - t1); t1 = timestamp(); const struct linop_s* Fx = linop_fx_create(wx, sy, sz, 1, tk, false); t2 = timestamp(); debug_printf(DP_INFO, "\tFx: %f seconds.\n", t2 - t1); t1 = timestamp(); const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave); t2 = timestamp(); debug_printf(DP_INFO, "\tW: %f seconds.\n", t2 - t1); t1 = timestamp(); const struct linop_s* Fyz = linop_fyz_create(wx, sy, sz, 1, tk, false); t2 = timestamp(); debug_printf(DP_INFO, "\tFyz: %f seconds.\n", t2 - t1); t1 = timestamp(); long single_channel_table_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_channel_table_dims, table_dims); single_channel_table_dims[1] = 1; const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims); t2 = timestamp(); debug_printf(DP_INFO, "\tK: %f seconds.\n", t2 - t1); struct linop_s* A_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF( R, Fx), W), Fyz), K); debug_printf(DP_INFO, "Single channel forward operator information:\n"); print_opdims(A_sc); struct linop_s* A = linop_multc_create(nc, md, maps, A_sc); debug_printf(DP_INFO, "Overall forward linear operator information:\n"); print_opdims(A); if (fwd != NULL) { debug_printf(DP_INFO, "Going from coefficients to data table... "); complex float* coeffs_to_fwd = load_cfl(fwd, DIMS, coeff_dims); complex float* table_forward = create_cfl(argv[6], DIMS, table_dims); const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk); const struct linop_s* CFx = linop_fx_create( wx, sy, sz, 1, tk, true); const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave); const struct linop_s* CFyz = linop_fyz_create(wx, sy, sz, 1, tk, true); const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims); struct linop_s* AC_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF( R, CFx), W), CFyz), K); struct linop_s* AC = linop_multc_create(nc, md, maps, AC_sc); operator_apply(AC->forward, DIMS, table_dims, table_forward, DIMS, coeff_dims, coeffs_to_fwd); debug_printf(DP_INFO, "Done.\n"); debug_printf(DP_INFO, "Cleaning up... "); linop_free(AC); linop_free(AC_sc); md_free(kernel); unmap_cfl(DIMS, maps_dims, maps); unmap_cfl(DIMS, wave_dims, wave); unmap_cfl(DIMS, phi_dims, phi); unmap_cfl(DIMS, reorder_dims, reorder); unmap_cfl(DIMS, table_dims, table); unmap_cfl(DIMS, table_dims, table_forward); debug_printf(DP_INFO, "Done.\n"); return 0; } if (dcx) { debug_printf(DP_INFO, "\tSplitting result into real and imaginary components.\n"); struct linop_s* tmp = A; struct linop_s* dcxop = linop_decompose_complex_create(DIMS, ITER_DIM, linop_domain(A)->dims); A = linop_chain(dcxop, tmp); debug_printf(DP_INFO, "New operator information:\n"); print_opdims(A); linop_free(dcxop); linop_free(tmp); } debug_printf(DP_INFO, "Normalizing data table and applying fftmod to table and maps... "); float norm = md_znorm(DIMS, table_dims, table); md_zsmul(DIMS, table_dims, table, table, 1. / norm); fftmod_apply(sy, sz, reorder_dims, reorder, table_dims, table, maps_dims, maps); debug_printf(DP_INFO, "Done.\n"); debug_printf(DP_INFO, "Preparing reconstruction operator... "); const struct operator_p_s* thresh_ops[NUM_REGS] = { NULL }; const struct linop_s* trafos[NUM_REGS] = { NULL }; opt_reg_configure(DIMS, coeff_dims, &ropts, thresh_ops, trafos, blksize, 1, gpun >= 0); int nr_penalties = ropts.r; struct reg_s* regs = ropts.regs; enum algo_t algo = ALGO_ADMM; struct iter it = italgo_config(algo, nr_penalties, regs, maxiter, -1, hgwld, false, admm, 1, false); debug_printf(DP_INFO, "Done.\n"); complex float* init = NULL; if (x0 != NULL) { debug_printf(DP_INFO, "Loading in initial guess... "); init = load_cfl(x0, DIMS, coeff_dims); debug_printf(DP_INFO, "Done.\n"); } debug_printf(DP_INFO, "Reconstruction... "); complex float* recon = create_cfl(argv[6], DIMS, coeff_dims); struct lsqr_conf lsqr_conf = { 0., gpun >= 0 }; double recon_start = timestamp(); const struct operator_p_s* J = lsqr2_create(&lsqr_conf, it.italgo, it.iconf, (const float*) init, A, NULL, nr_penalties, thresh_ops, trafos, NULL); operator_p_apply(J, 1., DIMS, coeff_dims, recon, DIMS, table_dims, table); md_zsmul(DIMS, coeff_dims, recon, recon, norm); double recon_end = timestamp(); debug_printf(DP_INFO, "Done.\nReconstruction time: %f seconds.\n", recon_end - recon_start); debug_printf(DP_INFO, "Cleaning up and saving result... "); operator_p_free(J); linop_free(A); linop_free(A_sc); md_free(kernel); unmap_cfl(DIMS, maps_dims, maps); unmap_cfl(DIMS, wave_dims, wave); unmap_cfl(DIMS, phi_dims, phi); unmap_cfl(DIMS, reorder_dims, reorder); unmap_cfl(DIMS, table_dims, table); unmap_cfl(DIMS, coeff_dims, recon); if (x0 != NULL) unmap_cfl(DIMS, coeff_dims, init); debug_printf(DP_INFO, "Done.\n"); double end_time = timestamp(); debug_printf(DP_INFO, "Total time: %f seconds.\n", end_time - start_time); return 0; }
kmp_csupport.c
/* * kmp_csupport.c -- kfront linkage support for OpenMP. */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// #include "omp.h" /* extern "C" declarations of user-visible routines */ #include "kmp.h" #include "kmp_i18n.h" #include "kmp_itt.h" #include "kmp_error.h" #include "kmp_stats.h" #if OMPT_SUPPORT #include "ompt-internal.h" #include "ompt-specific.h" #endif #define MAX_MESSAGE 512 /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /* flags will be used in future, e.g., to implement */ /* openmp_strict library restrictions */ /*! * @ingroup STARTUP_SHUTDOWN * @param loc in source location information * @param flags in for future use (currently ignored) * * Initialize the runtime library. This call is optional; if it is not made then * it will be implicitly called by attempts to use other library functions. * */ void __kmpc_begin(ident_t *loc, kmp_int32 flags) { // By default __kmp_ignore_mppbeg() returns TRUE. if (__kmp_ignore_mppbeg() == FALSE) { __kmp_internal_begin(); KC_TRACE( 10, ("__kmpc_begin: called\n" ) ); } } /*! * @ingroup STARTUP_SHUTDOWN * @param loc source location information * * Shutdown the runtime library. This is also optional, and even if called will not * do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to zero. */ void __kmpc_end(ident_t *loc) { // By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end() call no-op. // However, this can be overridden with KMP_IGNORE_MPPEND environment variable. // If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend() returns FALSE and __kmpc_end() // will unregister this root (it can cause library shut down). if (__kmp_ignore_mppend() == FALSE) { KC_TRACE( 10, ("__kmpc_end: called\n" ) ); KA_TRACE( 30, ("__kmpc_end\n" )); __kmp_internal_end_thread( -1 ); } } /*! @ingroup THREAD_STATES @param loc Source location information. @return The global thread index of the active thread. This function can be called in any context. If the runtime has ony been entered at the outermost level from a single (necessarily non-OpenMP<sup>*</sup>) thread, then the thread number is that which would be returned by omp_get_thread_num() in the outermost active parallel construct. (Or zero if there is no active parallel construct, since the master thread is necessarily thread zero). If multiple non-OpenMP threads all enter an OpenMP construct then this will be a unique thread identifier among all the threads created by the OpenMP runtime (but the value cannote be defined in terms of OpenMP thread ids returned by omp_get_thread_num()). */ kmp_int32 __kmpc_global_thread_num(ident_t *loc) { kmp_int32 gtid = __kmp_entry_gtid(); KC_TRACE( 10, ("__kmpc_global_thread_num: T#%d\n", gtid ) ); return gtid; } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads under control of the OpenMP<sup>*</sup> runtime This function can be called in any context. It returns the total number of threads under the control of the OpenMP runtime. That is not a number that can be determined by any OpenMP standard calls, since the library may be called from more than one non-OpenMP thread, and this reflects the total over all such calls. Similarly the runtime maintains underlying threads even when they are not active (since the cost of creating and destroying OS threads is high), this call counts all such threads even if they are not waiting for work. */ kmp_int32 __kmpc_global_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_nth ) ); return TCR_4(__kmp_nth); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The thread number of the calling thread in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_thread_num(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_thread_num: called\n" ) ); return __kmp_tid_from_gtid( __kmp_entry_gtid() ); } /*! @ingroup THREAD_STATES @param loc Source location information. @return The number of threads in the innermost active parallel construct. */ kmp_int32 __kmpc_bound_num_threads(ident_t *loc) { KC_TRACE( 10, ("__kmpc_bound_num_threads: called\n" ) ); return __kmp_entry_thread() -> th.th_team -> t.t_nproc; } /*! * @ingroup DEPRECATED * @param loc location description * * This function need not be called. It always returns TRUE. */ kmp_int32 __kmpc_ok_to_fork(ident_t *loc) { #ifndef KMP_DEBUG return TRUE; #else const char *semi2; const char *semi3; int line_no; if (__kmp_par_range == 0) { return TRUE; } semi2 = loc->psource; if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2, ';'); if (semi2 == NULL) { return TRUE; } semi2 = strchr(semi2 + 1, ';'); if (semi2 == NULL) { return TRUE; } if (__kmp_par_range_filename[0]) { const char *name = semi2 - 1; while ((name > loc->psource) && (*name != '/') && (*name != ';')) { name--; } if ((*name == '/') || (*name == ';')) { name++; } if (strncmp(__kmp_par_range_filename, name, semi2 - name)) { return __kmp_par_range < 0; } } semi3 = strchr(semi2 + 1, ';'); if (__kmp_par_range_routine[0]) { if ((semi3 != NULL) && (semi3 > semi2) && (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) { return __kmp_par_range < 0; } } if (KMP_SSCANF(semi3 + 1, "%d", &line_no) == 1) { if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) { return __kmp_par_range > 0; } return __kmp_par_range < 0; } return TRUE; #endif /* KMP_DEBUG */ } /*! @ingroup THREAD_STATES @param loc Source location information. @return 1 if this thread is executing inside an active parallel region, zero if not. */ kmp_int32 __kmpc_in_parallel( ident_t *loc ) { return __kmp_entry_thread() -> th.th_root -> r.r_active; } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_threads number of threads requested for this parallel construct Set the number of threads to be used by the next fork spawned by this thread. This call is only required if the parallel construct has a `num_threads` clause. */ void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n", global_tid, num_threads ) ); __kmp_push_num_threads( loc, global_tid, num_threads ); } void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid ) { KA_TRACE( 20, ("__kmpc_pop_num_threads: enter\n" ) ); /* the num_threads are automatically popped */ } #if OMP_40_ENABLED void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind ) { KA_TRACE( 20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n", global_tid, proc_bind ) ); __kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind ); } #endif /* OMP_40_ENABLED */ /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined parallel construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { KMP_STOP_EXPLICIT_TIMER(OMP_serial); KMP_COUNT_BLOCK(OMP_PARALLEL); int gtid = __kmp_entry_gtid(); // maybe to save thr_state is enough here { va_list ap; va_start( ap, microtask ); #if OMPT_SUPPORT int tid = __kmp_tid_from_gtid( gtid ); kmp_info_t *master_th = __kmp_threads[ gtid ]; kmp_team_t *parent_team = master_th->th.th_team; if (ompt_status & ompt_status_track) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0); } #endif #if INCLUDE_SSC_MARKS SSC_MARK_FORKING(); #endif __kmp_fork_call( loc, gtid, fork_context_intel, argc, #if OMPT_SUPPORT VOLATILE_CAST(void *) microtask, // "unwrapped" task #endif VOLATILE_CAST(microtask_t) microtask, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_task_func, /* TODO: revert workaround for Intel(R) 64 tracker #96 */ #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); #if INCLUDE_SSC_MARKS SSC_MARK_JOINING(); #endif __kmp_join_call( loc, gtid, fork_context_intel ); va_end( ap ); #if OMPT_SUPPORT if (ompt_status & ompt_status_track) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = 0; } #endif } KMP_START_EXPLICIT_TIMER(OMP_serial); } #if OMP_40_ENABLED /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number @param num_teams number of teams requested for the teams construct @param num_threads number of threads per team requested for the teams construct Set the number of teams to be used by the teams construct. This call is only required if the teams construct has a `num_teams` clause or a `thread_limit` clause (or both). */ void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads ) { KA_TRACE( 20, ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n", global_tid, num_teams, num_threads ) ); __kmp_push_num_teams( loc, global_tid, num_teams, num_threads ); } /*! @ingroup PARALLEL @param loc source location information @param argc total number of arguments in the ellipsis @param microtask pointer to callback routine consisting of outlined teams construct @param ... pointers to shared variables that aren't global Do the actual fork and call the microtask in the relevant number of threads. */ void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) { int gtid = __kmp_entry_gtid(); kmp_info_t *this_thr = __kmp_threads[ gtid ]; va_list ap; va_start( ap, microtask ); // remember teams entry point and nesting level this_thr->th.th_teams_microtask = microtask; this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level; // AC: can be >0 on host #if OMPT_SUPPORT kmp_team_t *parent_team = this_thr->th.th_team; int tid = __kmp_tid_from_gtid( gtid ); if (ompt_status & ompt_status_track) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0); } #endif // check if __kmpc_push_num_teams called, set default number of teams otherwise if ( this_thr->th.th_teams_size.nteams == 0 ) { __kmp_push_num_teams( loc, gtid, 0, 0 ); } KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1); KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1); __kmp_fork_call( loc, gtid, fork_context_intel, argc, #if OMPT_SUPPORT VOLATILE_CAST(void *) microtask, // "unwrapped" task #endif VOLATILE_CAST(microtask_t) __kmp_teams_master, // "wrapped" task VOLATILE_CAST(launch_t) __kmp_invoke_teams_master, #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX &ap #else ap #endif ); __kmp_join_call( loc, gtid, fork_context_intel ); #if OMPT_SUPPORT if (ompt_status & ompt_status_track) { parent_team->t.t_implicit_task_taskdata[tid]. ompt_task_info.frame.reenter_runtime_frame = NULL; } #endif this_thr->th.th_teams_microtask = NULL; this_thr->th.th_teams_level = 0; *(kmp_int64*)(&this_thr->th.th_teams_size) = 0L; va_end( ap ); } #endif /* OMP_40_ENABLED */ // // I don't think this function should ever have been exported. // The __kmpc_ prefix was misapplied. I'm fairly certain that no generated // openmp code ever called it, but it's been exported from the RTL for so // long that I'm afraid to remove the definition. // int __kmpc_invoke_task_func( int gtid ) { return __kmp_invoke_task_func( gtid ); } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Enter a serialized parallel construct. This interface is used to handle a conditional parallel region, like this, @code #pragma omp parallel if (condition) @endcode when the condition is false. */ void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { __kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with * kmp_fork_call since the tasks to be done are similar in each case. */ } /*! @ingroup PARALLEL @param loc source location information @param global_tid global thread number Leave a serialized parallel construct. */ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { kmp_internal_control_t *top; kmp_info_t *this_thr; kmp_team_t *serial_team; KC_TRACE( 10, ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) ); /* skip all this code for autopar serialized loops since it results in unacceptable overhead */ if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) ) return; // Not autopar code if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); this_thr = __kmp_threads[ global_tid ]; serial_team = this_thr->th.th_serial_team; #if OMP_41_ENABLED kmp_task_team_t * task_team = this_thr->th.th_task_team; // we need to wait for the proxy tasks before finishing the thread if ( task_team != NULL && task_team->tt.tt_found_proxy_tasks ) __kmp_task_team_wait(this_thr, serial_team, NULL ); // is an ITT object needed here? #endif KMP_MB(); KMP_DEBUG_ASSERT( serial_team ); KMP_ASSERT( serial_team -> t.t_serialized ); KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team ); KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads ); KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr ); /* If necessary, pop the internal control stack values and replace the team values */ top = serial_team -> t.t_control_stack_top; if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) { copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top ); serial_team -> t.t_control_stack_top = top -> next; __kmp_free(top); } //if( serial_team -> t.t_serialized > 1 ) serial_team -> t.t_level--; /* pop dispatch buffers stack */ KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer); { dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer; serial_team->t.t_dispatch->th_disp_buffer = serial_team->t.t_dispatch->th_disp_buffer->next; __kmp_free( disp_buffer ); } -- serial_team -> t.t_serialized; if ( serial_team -> t.t_serialized == 0 ) { /* return to the parallel section */ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 if ( __kmp_inherit_fp_control && serial_team->t.t_fp_control_saved ) { __kmp_clear_x87_fpu_status_word(); __kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word ); __kmp_load_mxcsr( &serial_team->t.t_mxcsr ); } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ this_thr -> th.th_team = serial_team -> t.t_parent; this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid; /* restore values cached in the thread */ this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc; /* JPH */ this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0]; /* JPH */ this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized; /* TODO the below shouldn't need to be adjusted for serialized teams */ this_thr -> th.th_dispatch = & this_thr -> th.th_team -> t.t_dispatch[ serial_team -> t.t_master_tid ]; __kmp_pop_current_task_from_thread( this_thr ); KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 ); this_thr -> th.th_current_task -> td_flags.executing = 1; if ( __kmp_tasking_mode != tskm_immediate_exec ) { // Copy the task team from the new child / old parent team to the thread. this_thr->th.th_task_team = this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]; KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n", global_tid, this_thr -> th.th_task_team, this_thr -> th.th_team ) ); } } else { if ( __kmp_tasking_mode != tskm_immediate_exec ) { KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n", global_tid, serial_team, serial_team -> t.t_serialized ) ); } } #if USE_ITT_BUILD kmp_uint64 cur_time = 0; #if USE_ITT_NOTIFY if ( __itt_get_timestamp_ptr ) { cur_time = __itt_get_timestamp(); } #endif /* USE_ITT_NOTIFY */ if ( this_thr->th.th_team->t.t_level == 0 #if OMP_40_ENABLED && this_thr->th.th_teams_microtask == NULL #endif ) { // Report the barrier this_thr->th.th_ident = loc; if ( ( __itt_frame_submit_v3_ptr || KMP_ITT_DEBUG ) && ( __kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 1 ) ) { __kmp_itt_frame_submit( global_tid, this_thr->th.th_frame_time_serialized, cur_time, 0, loc, this_thr->th.th_team_nproc, 0 ); if ( __kmp_forkjoin_frames_mode == 3 ) // Since barrier frame for serialized region is equal to the region we use the same begin timestamp as for the barrier. __kmp_itt_frame_submit( global_tid, serial_team->t.t_region_time, cur_time, 0, loc, this_thr->th.th_team_nproc, 2 ); } else if ( ( __itt_frame_end_v3_ptr || KMP_ITT_DEBUG ) && ! __kmp_forkjoin_frames_mode && __kmp_forkjoin_frames ) // Mark the end of the "parallel" region for VTune. Only use one of frame notification scheme at the moment. __kmp_itt_region_joined( global_tid, 1 ); } #endif /* USE_ITT_BUILD */ if ( __kmp_env_consistency_check ) __kmp_pop_parallel( global_tid, NULL ); } /*! @ingroup SYNCHRONIZATION @param loc source location information. Execute <tt>flush</tt>. This is implemented as a full memory fence. (Though depending on the memory ordering convention obeyed by the compiler even that may not be necessary). */ void __kmpc_flush(ident_t *loc) { KC_TRACE( 10, ("__kmpc_flush: called\n" ) ); /* need explicit __mf() here since use volatile instead in library */ KMP_MB(); /* Flush all pending memory write invalidates. */ #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 ) #if KMP_MIC // fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used. // We shouldn't need it, though, since the ABI rules require that // * If the compiler generates NGO stores it also generates the fence // * If users hand-code NGO stores they should insert the fence // therefore no incomplete unordered stores should be visible. #else // C74404 // This is to address non-temporal store instructions (sfence needed). // The clflush instruction is addressed either (mfence needed). // Probably the non-temporal load monvtdqa instruction should also be addressed. // mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2. if ( ! __kmp_cpuinfo.initialized ) { __kmp_query_cpuid( & __kmp_cpuinfo ); }; // if if ( ! __kmp_cpuinfo.sse2 ) { // CPU cannot execute SSE2 instructions. } else { #if KMP_COMPILER_ICC || KMP_COMPILER_MSVC _mm_mfence(); #else __sync_synchronize(); #endif // KMP_COMPILER_ICC }; // if #endif // KMP_MIC #elif (KMP_ARCH_ARM || KMP_ARCH_AARCH64) // Nothing to see here move along #elif KMP_ARCH_PPC64 // Nothing needed here (we have a real MB above). #if KMP_OS_CNK // The flushing thread needs to yield here; this prevents a // busy-waiting thread from saturating the pipeline. flush is // often used in loops like this: // while (!flag) { // #pragma omp flush(flag) // } // and adding the yield here is good for at least a 10x speedup // when running >2 threads per core (on the NAS LU benchmark). __kmp_yield(TRUE); #endif #else #error Unknown or unsupported architecture #endif } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Execute a barrier. */ void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_BARRIER); KMP_TIME_BLOCK(OMP_barrier); KC_TRACE( 10, ("__kmpc_barrier: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? }; // if __kmp_check_barrier( global_tid, ct_barrier, loc ); } __kmp_threads[ global_tid ]->th.th_ident = loc; // TODO: explicit barrier_wait_id: // this function is called when 'barrier' directive is present or // implicit barrier at the end of a worksharing construct. // 1) better to add a per-thread barrier counter to a thread data structure // 2) set to 0 when a new team is created // 4) no sync is required __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } /* The BARRIER for a MASTER section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @return 1 if this thread should execute the <tt>master</tt> block, 0 otherwise. */ kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_MASTER); int status = 0; KC_TRACE( 10, ("__kmpc_master: called T#%d\n", global_tid ) ); if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); if( KMP_MASTER_GTID( global_tid )) status = 1; #if OMPT_SUPPORT && OMPT_TRACE if (status) { if ((ompt_status == ompt_status_track_callback) && ompt_callbacks.ompt_callback(ompt_event_master_begin)) { kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_master_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } } #endif if ( __kmp_env_consistency_check ) { #if KMP_USE_DYNAMIC_LOCK if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL, 0 ); else __kmp_check_sync( global_tid, ct_master, loc, NULL, 0 ); #else if (status) __kmp_push_sync( global_tid, ct_master, loc, NULL ); else __kmp_check_sync( global_tid, ct_master, loc, NULL ); #endif } return status; } /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . Mark the end of a <tt>master</tt> region. This should only be called by the thread that executes the <tt>master</tt> region. */ void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_master: called T#%d\n", global_tid ) ); KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid )); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; if ((ompt_status == ompt_status_track_callback) && ompt_callbacks.ompt_callback(ompt_event_master_end)) { int tid = __kmp_tid_from_gtid( global_tid ); ompt_callbacks.ompt_callback(ompt_event_master_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif if ( __kmp_env_consistency_check ) { if( global_tid < 0 ) KMP_WARNING( ThreadIdentInvalid ); if( KMP_MASTER_GTID( global_tid )) __kmp_pop_sync( global_tid, ct_master, loc ); } } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. Start execution of an <tt>ordered</tt> construct. */ void __kmpc_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KMP_DEBUG_ASSERT( __kmp_init_serial ); KC_TRACE( 10, ("__kmpc_ordered: called T#%d\n", gtid )); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); #if USE_ITT_BUILD __kmp_itt_ordered_prep( gtid ); // TODO: ordered_wait_id #endif /* USE_ITT_BUILD */ th = __kmp_threads[ gtid ]; #if OMPT_SUPPORT && OMPT_TRACE if (ompt_status & ompt_status_track) { /* OMPT state update */ th->th.ompt_thread_info.wait_id = (uint64_t) loc; th->th.ompt_thread_info.state = ompt_state_wait_ordered; /* OMPT event callback */ if ((ompt_status == ompt_status_track_callback) && ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) { ompt_callbacks.ompt_callback(ompt_event_wait_ordered)( th->th.ompt_thread_info.wait_id); } } #endif if ( th -> th.th_dispatch -> th_deo_fcn != 0 ) (*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_deo( & gtid, & cid, loc ); #if OMPT_SUPPORT && OMPT_TRACE if (ompt_status & ompt_status_track) { /* OMPT state update */ th->th.ompt_thread_info.state = ompt_state_work_parallel; th->th.ompt_thread_info.wait_id = 0; /* OMPT event callback */ if ((ompt_status == ompt_status_track_callback) && ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) { ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)( th->th.ompt_thread_info.wait_id); } } #endif #if USE_ITT_BUILD __kmp_itt_ordered_start( gtid ); #endif /* USE_ITT_BUILD */ } /*! @ingroup WORK_SHARING @param loc source location information. @param gtid global thread number. End execution of an <tt>ordered</tt> construct. */ void __kmpc_end_ordered( ident_t * loc, kmp_int32 gtid ) { int cid = 0; kmp_info_t *th; KC_TRACE( 10, ("__kmpc_end_ordered: called T#%d\n", gtid ) ); #if USE_ITT_BUILD __kmp_itt_ordered_end( gtid ); // TODO: ordered_wait_id #endif /* USE_ITT_BUILD */ th = __kmp_threads[ gtid ]; if ( th -> th.th_dispatch -> th_dxo_fcn != 0 ) (*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc ); else __kmp_parallel_dxo( & gtid, & cid, loc ); #if OMPT_SUPPORT && OMPT_BLAME if ((ompt_status == ompt_status_track_callback) && ompt_callbacks.ompt_callback(ompt_event_release_ordered)) { ompt_callbacks.ompt_callback(ompt_event_release_ordered)( th->th.ompt_thread_info.wait_id); } #endif } #if KMP_USE_DYNAMIC_LOCK static __forceinline kmp_indirect_lock_t * __kmp_get_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid, kmp_dyna_lockseq_t seq) { // Code from __kmp_get_critical_section_ptr // This function returns an indirect lock object instead of a user lock. kmp_indirect_lock_t **lck, *ret; lck = (kmp_indirect_lock_t **)crit; ret = (kmp_indirect_lock_t *)TCR_PTR(*lck); if (ret == NULL) { void *idx; kmp_indirect_locktag_t tag = DYNA_GET_I_TAG(seq); kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag); ret = ilk; DYNA_I_LOCK_FUNC(ilk, init)(ilk->lock); DYNA_SET_I_LOCK_LOCATION(ilk, loc); DYNA_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section); KA_TRACE(20, ("__kmp_get_indirect_csptr: initialized indirect lock #%d\n", tag)); #if USE_ITT_BUILD __kmp_itt_critical_creating(ilk->lock, loc); #endif int status = KMP_COMPARE_AND_STORE_PTR(lck, 0, ilk); if (status == 0) { #if USE_ITT_BUILD __kmp_itt_critical_destroyed(ilk->lock); #endif // Postponing destroy, to avoid costly dispatch here. //DYNA_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx); ret = (kmp_indirect_lock_t *)TCR_PTR(*lck); KMP_DEBUG_ASSERT(ret != NULL); } } return ret; } // Fast-path acquire tas lock #define DYNA_ACQUIRE_TAS_LOCK(lock, gtid) { \ kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ if (l->lk.poll != DYNA_LOCK_FREE(tas) || \ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas))) { \ kmp_uint32 spins; \ KMP_FSYNC_PREPARE(l); \ KMP_INIT_YIELD(spins); \ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ KMP_YIELD(TRUE); \ } else { \ KMP_YIELD_SPIN(spins); \ } \ while (l->lk.poll != DYNA_LOCK_FREE(tas) || \ ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas))) { \ if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ KMP_YIELD(TRUE); \ } else { \ KMP_YIELD_SPIN(spins); \ } \ } \ } \ KMP_FSYNC_ACQUIRED(l); \ } // Fast-path test tas lock #define DYNA_TEST_TAS_LOCK(lock, gtid, rc) { \ kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ rc = l->lk.poll == DYNA_LOCK_FREE(tas) && \ KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas)); \ } // Fast-path release tas lock #define DYNA_RELEASE_TAS_LOCK(lock, gtid) { \ TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, DYNA_LOCK_FREE(tas)); \ KMP_MB(); \ } #if DYNA_HAS_FUTEX # include <unistd.h> # include <sys/syscall.h> # ifndef FUTEX_WAIT # define FUTEX_WAIT 0 # endif # ifndef FUTEX_WAKE # define FUTEX_WAKE 1 # endif // Fast-path acquire futex lock #define DYNA_ACQUIRE_FUTEX_LOCK(lock, gtid) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ kmp_int32 gtid_code = (gtid+1) << 1; \ KMP_MB(); \ KMP_FSYNC_PREPARE(ftx); \ kmp_int32 poll_val; \ while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), DYNA_LOCK_FREE(futex), \ DYNA_LOCK_BUSY(gtid_code, futex))) != DYNA_LOCK_FREE(futex)) { \ kmp_int32 cond = DYNA_LOCK_STRIP(poll_val) & 1; \ if (!cond) { \ if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | DYNA_LOCK_BUSY(1, futex))) { \ continue; \ } \ poll_val |= DYNA_LOCK_BUSY(1, futex); \ } \ kmp_int32 rc; \ if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, NULL, NULL, 0)) != 0) { \ continue; \ } \ gtid_code |= 1; \ } \ KMP_FSYNC_ACQUIRED(ftx); \ } // Fast-path test futex lock #define DYNA_TEST_FUTEX_LOCK(lock, gtid, rc) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), DYNA_LOCK_FREE(futex), DYNA_LOCK_BUSY(gtid+1, futex) << 1)) { \ KMP_FSYNC_ACQUIRED(ftx); \ rc = TRUE; \ } else { \ rc = FALSE; \ } \ } // Fast-path release futex lock #define DYNA_RELEASE_FUTEX_LOCK(lock, gtid) { \ kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ KMP_MB(); \ KMP_FSYNC_RELEASING(ftx); \ kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), DYNA_LOCK_FREE(futex)); \ if (DYNA_LOCK_STRIP(poll_val) & 1) { \ syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, DYNA_LOCK_BUSY(1, futex), NULL, NULL, 0); \ } \ KMP_MB(); \ KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \ } #endif // DYNA_HAS_FUTEX #else // KMP_USE_DYNAMIC_LOCK static kmp_user_lock_p __kmp_get_critical_section_ptr( kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid ) { kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit; // // Because of the double-check, the following load // doesn't need to be volatile. // kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); if ( lck == NULL ) { void * idx; // Allocate & initialize the lock. // Remember allocated locks in table in order to free them in __kmp_cleanup() lck = __kmp_user_lock_allocate( &idx, gtid, kmp_lf_critical_section ); __kmp_init_user_lock_with_checks( lck ); __kmp_set_user_lock_location( lck, loc ); #if USE_ITT_BUILD __kmp_itt_critical_creating( lck ); // __kmp_itt_critical_creating() should be called *before* the first usage of underlying // lock. It is the only place where we can guarantee it. There are chances the lock will // destroyed with no usage, but it is not a problem, because this is not real event seen // by user but rather setting name for object (lock). See more details in kmp_itt.h. #endif /* USE_ITT_BUILD */ // // Use a cmpxchg instruction to slam the start of the critical // section with the lock pointer. If another thread beat us // to it, deallocate the lock, and use the lock that the other // thread allocated. // int status = KMP_COMPARE_AND_STORE_PTR( lck_pp, 0, lck ); if ( status == 0 ) { // Deallocate the lock and reload the value. #if USE_ITT_BUILD __kmp_itt_critical_destroyed( lck ); // Let ITT know the lock is destroyed and the same memory location may be reused for // another purpose. #endif /* USE_ITT_BUILD */ __kmp_destroy_user_lock_with_checks( lck ); __kmp_user_lock_free( &idx, gtid, lck ); lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); KMP_DEBUG_ASSERT( lck != NULL ); } } return lck; } #endif // KMP_USE_DYNAMIC_LOCK /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Enter code protected by a `critical` construct. This function blocks until the executing thread can enter the critical section. */ void __kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { KMP_COUNT_BLOCK(OMP_CRITICAL); kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); #if KMP_USE_DYNAMIC_LOCK // Assumption: all direct locks fit in OMP_CRITICAL_SIZE. // The global sequence __kmp_user_lock_seq is used unless compiler pushes a value. if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; // The thread that reaches here first needs to tag the lock word. if (*((kmp_dyna_lock_t *)lck) == 0) { KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, DYNA_GET_D_TAG(__kmp_user_lock_seq)); } if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } # if USE_ITT_BUILD __kmp_itt_critical_acquiring(lck); # endif # if DYNA_USE_FAST_TAS if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { DYNA_ACQUIRE_TAS_LOCK(lck, global_tid); } else # elif DYNA_USE_FAST_FUTEX if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { DYNA_ACQUIRE_FUTEX_LOCK(lck, global_tid); } else # endif { DYNA_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid); } } else { kmp_indirect_lock_t *ilk = __kmp_get_indirect_csptr(crit, loc, global_tid, __kmp_user_lock_seq); lck = ilk->lock; if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } # if USE_ITT_BUILD __kmp_itt_critical_acquiring(lck); # endif DYNA_I_LOCK_FUNC(ilk, set)(lck, global_tid); } #else // KMP_USE_DYNAMIC_LOCK //TODO: add THR_OVHD_STATE KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #endif else { // ticket, queuing or drdpa lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); } if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_critical, loc, lck ); /* since the critical directive binds to all threads, not just * the current team we have to check this even if we are in a * serialized team */ /* also, even if we are the uber thread, we still have to conduct the lock, * as we have to contend with sibling threads */ #if USE_ITT_BUILD __kmp_itt_critical_acquiring( lck ); #endif /* USE_ITT_BUILD */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_acquire_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK #if USE_ITT_BUILD __kmp_itt_critical_acquired( lck ); #endif /* USE_ITT_BUILD */ KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); } // __kmpc_critical /*! @ingroup WORK_SHARING @param loc source location information. @param global_tid global thread number . @param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or some other suitably unique value. Leave a critical section, releasing any lock that was held during its execution. */ void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit) { kmp_user_lock_p lck; KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid )); #if KMP_USE_DYNAMIC_LOCK if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; KMP_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_pop_sync(global_tid, ct_critical, loc); } # if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); # endif # if DYNA_USE_FAST_TAS if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { DYNA_RELEASE_TAS_LOCK(lck, global_tid); } else # elif DYNA_USE_FAST_FUTEX if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { DYNA_RELEASE_FUTEX_LOCK(lck, global_tid); } else # endif { DYNA_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); } } else { kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); KMP_ASSERT(ilk != NULL); lck = ilk->lock; if (__kmp_env_consistency_check) { __kmp_pop_sync(global_tid, ct_critical, loc); } # if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); # endif DYNA_I_LOCK_FUNC(ilk, unset)(lck, global_tid); } #else // KMP_USE_DYNAMIC_LOCK if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { lck = (kmp_user_lock_p)crit; } #endif else { // ticket, queuing or drdpa lck = (kmp_user_lock_p) TCR_PTR(*((kmp_user_lock_p *)crit)); } KMP_ASSERT(lck != NULL); if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_critical, loc ); #if USE_ITT_BUILD __kmp_itt_critical_releasing( lck ); #endif /* USE_ITT_BUILD */ // Value of 'crit' should be good for using as a critical_id of the critical section directive. __kmp_release_user_lock_with_checks( lck, global_tid ); #if OMPT_SUPPORT && OMPT_BLAME if ((ompt_status == ompt_status_track_callback) && ompt_callbacks.ompt_callback(ompt_event_release_critical)) { ompt_callbacks.ompt_callback(ompt_event_release_critical)( (uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK KA_TRACE( 15, ("__kmpc_end_critical: done T#%d\n", global_tid )); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master. The barrier is executed inside this function. */ kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) { int status; KC_TRACE( 10, ("__kmpc_barrier_master: called T#%d\n", global_tid ) ); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) __kmp_check_barrier( global_tid, ct_barrier, loc ); #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif status = __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL ); return (status != 0) ? 0 : 1; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. Complete the execution of a combined barrier and master. This function should only be called at the completion of the <tt>master</tt> code. Other threads will still be waiting at the barrier and this call releases them. */ void __kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid) { KC_TRACE( 10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid )); __kmp_end_split_barrier ( bs_plain_barrier, global_tid ); } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid thread id. @return one if the thread should execute the master block, zero otherwise Start execution of a combined barrier and master(nowait) construct. The barrier is executed inside this function. There is no equivalent "end" function, since the */ kmp_int32 __kmpc_barrier_master_nowait( ident_t * loc, kmp_int32 global_tid ) { kmp_int32 ret; KC_TRACE( 10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid )); if (! TCR_4(__kmp_init_parallel)) __kmp_parallel_initialize(); if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? } __kmp_check_barrier( global_tid, ct_barrier, loc ); } #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); ret = __kmpc_master (loc, global_tid); if ( __kmp_env_consistency_check ) { /* there's no __kmpc_end_master called; so the (stats) */ /* actions of __kmpc_end_master are done here */ if ( global_tid < 0 ) { KMP_WARNING( ThreadIdentInvalid ); } if (ret) { /* only one thread should do the pop since only */ /* one did the push (see __kmpc_master()) */ __kmp_pop_sync( global_tid, ct_master, loc ); } } return (ret); } /* The BARRIER for a SINGLE process section is always explicit */ /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number @return One if this thread should execute the single construct, zero otherwise. Test whether to execute a <tt>single</tt> construct. There are no implicit barriers in the two "single" calls, rather the compiler should introduce an explicit barrier if it is required. */ kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid) { KMP_COUNT_BLOCK(OMP_SINGLE); kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE ); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if ((ompt_status == ompt_status_track_callback)) { if (rc) { if (ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)) { ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id, team->t.ompt_team_info.microtask); } } else { if (ompt_callbacks.ompt_callback(ompt_event_single_others_begin)) { ompt_callbacks.ompt_callback(ompt_event_single_others_begin)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } this_thr->th.ompt_thread_info.state = ompt_state_wait_single; } } #endif return rc; } /*! @ingroup WORK_SHARING @param loc source location information @param global_tid global thread number Mark the end of a <tt>single</tt> construct. This function should only be called by the thread that executed the block of code protected by the `single` construct. */ void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid) { __kmp_exit_single( global_tid ); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if ((ompt_status == ompt_status_track_callback) && ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)) { ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif } /*! @ingroup WORK_SHARING @param loc Source location @param global_tid Global thread id Mark the end of a statically scheduled loop. */ void __kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid ) { KE_TRACE( 10, ("__kmpc_for_static_fini called T#%d\n", global_tid)); #if OMPT_SUPPORT && OMPT_TRACE kmp_info_t *this_thr = __kmp_threads[ global_tid ]; kmp_team_t *team = this_thr -> th.th_team; int tid = __kmp_tid_from_gtid( global_tid ); if ((ompt_status == ompt_status_track_callback) && ompt_callbacks.ompt_callback(ompt_event_loop_end)) { ompt_callbacks.ompt_callback(ompt_event_loop_end)( team->t.ompt_team_info.parallel_id, team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); } #endif if ( __kmp_env_consistency_check ) __kmp_pop_workshare( global_tid, ct_pdo, loc ); } /* * User routines which take C-style arguments (call by value) * different from the Fortran equivalent routines */ void ompc_set_num_threads( int arg ) { // !!!!! TODO: check the per-task binding __kmp_set_num_threads( arg, __kmp_entry_gtid() ); } void ompc_set_dynamic( int flag ) { kmp_info_t *thread; /* For the thread-private implementation of the internal controls */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__dynamic( thread, flag ? TRUE : FALSE ); } void ompc_set_nested( int flag ) { kmp_info_t *thread; /* For the thread-private internal controls implementation */ thread = __kmp_entry_thread(); __kmp_save_internal_controls( thread ); set__nested( thread, flag ? TRUE : FALSE ); } void ompc_set_max_active_levels( int max_active_levels ) { /* TO DO */ /* we want per-task implementation of this internal control */ /* For the per-thread internal controls implementation */ __kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels ); } void ompc_set_schedule( omp_sched_t kind, int modifier ) { // !!!!! TODO: check the per-task binding __kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier ); } int ompc_get_ancestor_thread_num( int level ) { return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level ); } int ompc_get_team_size( int level ) { return __kmp_get_team_size( __kmp_entry_gtid(), level ); } void kmpc_set_stacksize( int arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_stacksize_s( size_t arg ) { // __kmp_aux_set_stacksize initializes the library if needed __kmp_aux_set_stacksize( arg ); } void kmpc_set_blocktime( int arg ) { int gtid, tid; kmp_info_t *thread; gtid = __kmp_entry_gtid(); tid = __kmp_tid_from_gtid(gtid); thread = __kmp_thread_from_gtid(gtid); __kmp_aux_set_blocktime( arg, thread, tid ); } void kmpc_set_library( int arg ) { // __kmp_user_set_library initializes the library if needed __kmp_user_set_library( (enum library_type)arg ); } void kmpc_set_defaults( char const * str ) { // __kmp_aux_set_defaults initializes the library if needed __kmp_aux_set_defaults( str, KMP_STRLEN( str ) ); } int kmpc_set_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_set_affinity_mask_proc( proc, mask ); #endif } int kmpc_unset_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_unset_affinity_mask_proc( proc, mask ); #endif } int kmpc_get_affinity_mask_proc( int proc, void **mask ) { #if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED return -1; #else if ( ! TCR_4(__kmp_init_middle) ) { __kmp_middle_initialize(); } return __kmp_aux_get_affinity_mask_proc( proc, mask ); #endif } /* -------------------------------------------------------------------------- */ /*! @ingroup THREADPRIVATE @param loc source location information @param gtid global thread number @param cpy_size size of the cpy_data buffer @param cpy_data pointer to data to be copied @param cpy_func helper function to call for copying data @param didit flag variable: 1=single thread; 0=not single thread __kmpc_copyprivate implements the interface for the private data broadcast needed for the copyprivate clause associated with a single region in an OpenMP<sup>*</sup> program (both C and Fortran). All threads participating in the parallel region call this routine. One of the threads (called the single thread) should have the <tt>didit</tt> variable set to 1 and all other threads should have that variable set to 0. All threads pass a pointer to a data buffer (cpy_data) that they have built. The OpenMP specification forbids the use of nowait on the single region when a copyprivate clause is present. However, @ref __kmpc_copyprivate implements a barrier internally to avoid race conditions, so the code generation for the single region should avoid generating a barrier after the call to @ref __kmpc_copyprivate. The <tt>gtid</tt> parameter is the global thread id for the current thread. The <tt>loc</tt> parameter is a pointer to source location information. Internal implementation: The single thread will first copy its descriptor address (cpy_data) to a team-private location, then the other threads will each call the function pointed to by the parameter cpy_func, which carries out the copy by copying the data using the cpy_data buffer. The cpy_func routine used for the copy and the contents of the data area defined by cpy_data and cpy_size may be built in any fashion that will allow the copy to be done. For instance, the cpy_data buffer can hold the actual data to be copied or it may hold a list of pointers to the data. The cpy_func routine must interpret the cpy_data buffer appropriately. The interface to cpy_func is as follows: @code void cpy_func( void *destination, void *source ) @endcode where void *destination is the cpy_data pointer for the thread being copied to and void *source is the cpy_data pointer for the thread being copied from. */ void __kmpc_copyprivate( ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void*,void*), kmp_int32 didit ) { void **data_ptr; KC_TRACE( 10, ("__kmpc_copyprivate: called T#%d\n", gtid )); KMP_MB(); data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data; if ( __kmp_env_consistency_check ) { if ( loc == 0 ) { KMP_WARNING( ConstructIdentInvalid ); } } /* ToDo: Optimize the following two barriers into some kind of split barrier */ if (didit) *data_ptr = cpy_data; /* This barrier is not a barrier region boundary */ #if USE_ITT_NOTIFY __kmp_threads[gtid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); if (! didit) (*cpy_func)( cpy_data, *data_ptr ); /* Consider next barrier the user-visible barrier for barrier region boundaries */ /* Nesting checks are already handled by the single construct checks */ #if USE_ITT_NOTIFY __kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g. tasks can overwrite the location) #endif __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); } /* -------------------------------------------------------------------------- */ #define INIT_LOCK __kmp_init_user_lock_with_checks #define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks #define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks #define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed #define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks #define ACQUIRE_NESTED_LOCK_TIMED __kmp_acquire_nested_user_lock_with_checks_timed #define RELEASE_LOCK __kmp_release_user_lock_with_checks #define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks #define TEST_LOCK __kmp_test_user_lock_with_checks #define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks #define DESTROY_LOCK __kmp_destroy_user_lock_with_checks #define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks /* * TODO: Make check abort messages use location info & pass it * into with_checks routines */ /* initialize the lock */ void __kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_lock"); } if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) { DYNA_INIT_D_LOCK(user_lock, __kmp_user_lock_seq); # if USE_ITT_BUILD __kmp_itt_lock_creating((kmp_user_lock_p)user_lock, NULL); # endif } else { DYNA_INIT_I_LOCK(user_lock, __kmp_user_lock_seq); kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(user_lock); DYNA_SET_I_LOCK_LOCATION(ilk, loc); # if USE_ITT_BUILD __kmp_itt_lock_creating(ilk->lock, loc); # endif } #else // KMP_USE_DYNAMIC_LOCK static char const * const func = "omp_init_lock"; kmp_user_lock_p lck; KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( __kmp_env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); } INIT_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_init_lock /* initialize the lock */ void __kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK KMP_DEBUG_ASSERT(__kmp_init_serial); if (__kmp_env_consistency_check && user_lock == NULL) { KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock"); } // Invoke init function after converting to nested version. kmp_dyna_lockseq_t nested_seq; switch (__kmp_user_lock_seq) { case lockseq_tas: nested_seq = lockseq_nested_tas; break; #if DYNA_HAS_FUTEX case lockseq_futex: nested_seq = lockseq_nested_futex; break; #endif case lockseq_ticket: nested_seq = lockseq_nested_ticket; break; case lockseq_queuing: nested_seq = lockseq_nested_queuing; break; case lockseq_drdpa: nested_seq = lockseq_nested_drdpa; break; default: nested_seq = lockseq_nested_queuing; break; // Use nested queuing lock for lock kinds without "nested" implementation. } DYNA_INIT_I_LOCK(user_lock, nested_seq); // All nested locks are indirect locks. kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(user_lock); DYNA_SET_I_LOCK_LOCATION(ilk, loc); # if USE_ITT_BUILD __kmp_itt_lock_creating(ilk->lock, loc); # endif #else // KMP_USE_DYNAMIC_LOCK static char const * const func = "omp_init_nest_lock"; kmp_user_lock_p lck; KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( __kmp_env_consistency_check ) { if ( user_lock == NULL ) { KMP_FATAL( LockIsUninitialized, func ); } } KMP_CHECK_USER_LOCK_INIT(); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); } INIT_NESTED_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_init_nest_lock void __kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD kmp_user_lock_p lck; if (DYNA_EXTRACT_D_TAG(user_lock) == 0) { lck = ((kmp_indirect_lock_t *)DYNA_LOOKUP_I_LOCK(user_lock))->lock; } else { lck = (kmp_user_lock_p)user_lock; } __kmp_itt_lock_destroyed(lck); # endif DYNA_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); #else kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ DESTROY_LOCK( lck ); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { ; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { ; } #endif else { __kmp_user_lock_free( user_lock, gtid, lck ); } #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_destroy_lock /* destroy the lock */ void __kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD kmp_indirect_lock_t *ilk = DYNA_LOOKUP_I_LOCK(user_lock); __kmp_itt_lock_destroyed(ilk->lock); # endif DYNA_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ DESTROY_NESTED_LOCK( lck ); if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { ; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { ; } #endif else { __kmp_user_lock_free( user_lock, gtid, lck ); } #endif // KMP_USE_DYNAMIC_LOCK } // __kmpc_destroy_nest_lock void __kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { KMP_COUNT_BLOCK(OMP_set_lock); #if KMP_USE_DYNAMIC_LOCK int tag = DYNA_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object. # endif # if DYNA_USE_FAST_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { DYNA_ACQUIRE_TAS_LOCK(user_lock, gtid); } else # elif DYNA_USE_FAST_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { DYNA_ACQUIRE_FUTEX_LOCK(user_lock, gtid); } else # endif { __kmp_direct_set_ops[tag]((kmp_dyna_lock_t *)user_lock, gtid); } # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); # endif #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_set_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ ACQUIRE_LOCK( lck, gtid ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } void __kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif DYNA_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid); # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); #endif #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_set_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ ACQUIRE_NESTED_LOCK( lck, gtid ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK } void __kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK int tag = DYNA_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); # endif # if DYNA_USE_FAST_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { DYNA_RELEASE_TAS_LOCK(user_lock, gtid); } else # elif DYNA_USE_FAST_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { DYNA_RELEASE_FUTEX_LOCK(user_lock, gtid); } else # endif { __kmp_direct_unset_ops[tag]((kmp_dyna_lock_t *)user_lock, gtid); } #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; /* Can't use serial interval since not block structured */ /* release the lock */ if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) // "fast" path implemented to fix customer performance issue #if USE_ITT_BUILD __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); #endif /* USE_ITT_BUILD */ TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0); KMP_MB(); return; #else lck = (kmp_user_lock_p)user_lock; #endif } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_unset_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_releasing( lck ); #endif /* USE_ITT_BUILD */ RELEASE_LOCK( lck, gtid ); #if OMPT_SUPPORT && OMPT_BLAME if ((ompt_status == ompt_status_track_callback) && ompt_callbacks.ompt_callback(ompt_event_release_lock)) { ompt_callbacks.ompt_callback(ompt_event_release_lock)((uint64_t) lck); } #endif #endif // KMP_USE_DYNAMIC_LOCK } /* release the lock */ void __kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK # if USE_ITT_BUILD __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); # endif DYNA_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid); #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; /* Can't use serial interval since not block structured */ if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) // "fast" path implemented to fix customer performance issue kmp_tas_lock_t *tl = (kmp_tas_lock_t*)user_lock; #if USE_ITT_BUILD __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); #endif /* USE_ITT_BUILD */ if ( --(tl->lk.depth_locked) == 0 ) { TCW_4(tl->lk.poll, 0); } KMP_MB(); return; #else lck = (kmp_user_lock_p)user_lock; #endif } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_unset_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_releasing( lck ); #endif /* USE_ITT_BUILD */ int release_status; release_status = RELEASE_NESTED_LOCK( lck, gtid ); #if OMPT_SUPPORT && OMPT_BLAME if (ompt_status == ompt_status_track_callback) { if (release_status == KMP_LOCK_RELEASED) { if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)) { ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)( (uint64_t) lck); } } else if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)) { ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)( (uint64_t) lck); } } #endif #endif // KMP_USE_DYNAMIC_LOCK } /* try to acquire the lock */ int __kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { KMP_COUNT_BLOCK(OMP_test_lock); KMP_TIME_BLOCK(OMP_test_lock); #if KMP_USE_DYNAMIC_LOCK int rc; int tag = DYNA_EXTRACT_D_TAG(user_lock); # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif # if DYNA_USE_FAST_TAS if (tag == locktag_tas && !__kmp_env_consistency_check) { DYNA_TEST_TAS_LOCK(user_lock, gtid, rc); } else # elif DYNA_USE_FAST_FUTEX if (tag == locktag_futex && !__kmp_env_consistency_check) { DYNA_TEST_FUTEX_LOCK(user_lock, gtid, rc); } else # endif { rc = __kmp_direct_test_ops[tag]((kmp_dyna_lock_t *)user_lock, gtid); } if (rc) { # if USE_ITT_BUILD __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); # endif return FTN_TRUE; } else { # if USE_ITT_BUILD __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); # endif return FTN_FALSE; } #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; int rc; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_test_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ rc = TEST_LOCK( lck, gtid ); #if USE_ITT_BUILD if ( rc ) { __kmp_itt_lock_acquired( lck ); } else { __kmp_itt_lock_cancelled( lck ); } #endif /* USE_ITT_BUILD */ return ( rc ? FTN_TRUE : FTN_FALSE ); /* Can't use serial interval since not block structured */ #endif // KMP_USE_DYNAMIC_LOCK } /* try to acquire the lock */ int __kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) { #if KMP_USE_DYNAMIC_LOCK int rc; # if USE_ITT_BUILD __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); # endif rc = DYNA_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid); # if USE_ITT_BUILD if (rc) { __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); } else { __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); } # endif return rc; #else // KMP_USE_DYNAMIC_LOCK kmp_user_lock_p lck; int rc; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) else if ( ( __kmp_user_lock_kind == lk_futex ) && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { lck = (kmp_user_lock_p)user_lock; } #endif else { lck = __kmp_lookup_user_lock( user_lock, "omp_test_nest_lock" ); } #if USE_ITT_BUILD __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ rc = TEST_NESTED_LOCK( lck, gtid ); #if USE_ITT_BUILD if ( rc ) { __kmp_itt_lock_acquired( lck ); } else { __kmp_itt_lock_cancelled( lck ); } #endif /* USE_ITT_BUILD */ return rc; /* Can't use serial interval since not block structured */ #endif // KMP_USE_DYNAMIC_LOCK } /*--------------------------------------------------------------------------------------------------------------------*/ /* * Interface to fast scalable reduce methods routines */ // keep the selected method in a thread local structure for cross-function usage: will be used in __kmpc_end_reduce* functions; // another solution: to re-determine the method one more time in __kmpc_end_reduce* functions (new prototype required then) // AT: which solution is better? #define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \ ( ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) ) #define __KMP_GET_REDUCTION_METHOD(gtid) \ ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) // description of the packed_reduction_method variable: look at the macros in kmp.h // used in a critical section reduce block static __forceinline void __kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { // this lock was visible to a customer and to the thread profiler as a serial overhead span // (although it's used for an internal purpose only) // why was it visible in previous implementation? // should we keep it visible in new reduce block? kmp_user_lock_p lck; #if KMP_USE_DYNAMIC_LOCK if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; if (*((kmp_dyna_lock_t *)lck) == 0) { KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)lck, 0, DYNA_GET_D_TAG(__kmp_user_lock_seq)); } KMP_DEBUG_ASSERT(lck != NULL); if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); } DYNA_D_LOCK_FUNC(lck, set)((kmp_dyna_lock_t *)lck, global_tid); } else { kmp_indirect_lock_t *ilk = __kmp_get_indirect_csptr(crit, loc, global_tid, __kmp_user_lock_seq); KMP_DEBUG_ASSERT(ilk != NULL); if (__kmp_env_consistency_check) { __kmp_push_sync(global_tid, ct_critical, loc, ilk->lock, __kmp_user_lock_seq); } DYNA_I_LOCK_FUNC(ilk, set)(ilk->lock, global_tid); } #else // KMP_USE_DYNAMIC_LOCK // We know that the fast reduction code is only emitted by Intel compilers // with 32 byte critical sections. If there isn't enough space, then we // have to use a pointer. if ( __kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE ) { lck = (kmp_user_lock_p)crit; } else { lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); } KMP_DEBUG_ASSERT( lck != NULL ); if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_critical, loc, lck ); __kmp_acquire_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK } // used in a critical section reduce block static __forceinline void __kmp_end_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { kmp_user_lock_p lck; #if KMP_USE_DYNAMIC_LOCK if (DYNA_IS_D_LOCK(__kmp_user_lock_seq)) { lck = (kmp_user_lock_p)crit; if (__kmp_env_consistency_check) __kmp_pop_sync(global_tid, ct_critical, loc); DYNA_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); } else { kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); if (__kmp_env_consistency_check) __kmp_pop_sync(global_tid, ct_critical, loc); DYNA_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid); } #else // KMP_USE_DYNAMIC_LOCK // We know that the fast reduction code is only emitted by Intel compilers with 32 byte critical // sections. If there isn't enough space, then we have to use a pointer. if ( __kmp_base_user_lock_size > 32 ) { lck = *( (kmp_user_lock_p *) crit ); KMP_ASSERT( lck != NULL ); } else { lck = (kmp_user_lock_p) crit; } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_critical, loc ); __kmp_release_user_lock_with_checks( lck, global_tid ); #endif // KMP_USE_DYNAMIC_LOCK } // __kmp_end_critical_section_reduce_block /* 2.a.i. Reduce Block without a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed The nowait version is used for a reduce clause with the nowait argument. */ kmp_int32 __kmpc_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_nowait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; #if OMP_40_ENABLED kmp_team_t *team; kmp_info_t *th; int teams_swapped = 0, task_state; #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be used as a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting #if KMP_USE_DYNAMIC_LOCK if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); #else if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #endif #if OMP_40_ENABLED th = __kmp_thread_from_gtid(global_tid); if( th->th.th_teams_microtask ) { // AC: check if we are inside the teams construct? team = th->th.th_team; if( team->t.t_level == th->th.th_teams_level ) { // this is reduction at teams construct KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0 // Let's swap teams temporarily for the reduction barrier teams_swapped = 1; th->th.th_info.ds.ds_tid = team->t.t_master_tid; th->th.th_team = team->t.t_parent; th->th.th_team_nproc = th->th.th_team->t.t_nproc; th->th.th_task_team = th->th.th_team->t.t_task_team[0]; task_state = th->th.th_task_state; th->th.th_task_state = 0; } } #endif // OMP_40_ENABLED // packed_reduction_method value will be reused by __kmp_end_reduce* function, the value should be kept in a variable // the variable should be either a construct-specific or thread-specific property, not a team specific property // (a thread can reach the next reduce block on the next construct, reduce method may differ on the next construct) // an ident_t "loc" parameter could be used as a construct-specific property (what if loc == 0?) // (if both construct-specific and team-specific variables were shared, then unness extra syncs should be needed) // a thread-specific variable is better regarding two issues above (next construct and extra syncs) // a thread-specific "th_local.reduction_method" variable is used currently // each thread executes 'determine' and 'set' lines (no need to execute by one thread, to avoid unness extra syncs) packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); if( packed_reduction_method == critical_reduce_block ) { __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; // all threads should do this pop here (because __kmpc_end_reduce_nowait() won't be called by the code gen) // (it's not quite good, because the checking block has been closed by this 'pop', // but atomic operation has not been executed yet, will be executed slightly later, literally on next instruction) if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { //AT: performance issue: a real barrier here //AT: (if master goes slow, other threads are blocked here waiting for the master to come and release them) //AT: (it's not what a customer might expect specifying NOWAIT clause) //AT: (specifying NOWAIT won't result in improvement of performance, it'll be confusing to a customer) //AT: another implementation of *barrier_gather*nowait() (or some other design) might go faster // and be more in line with sense of NOWAIT //AT: TO DO: do epcc test and compare times // this barrier should be invisible to a customer and to the thread profiler // (it's neither a terminating barrier nor customer's code, it's used for an internal purpose) #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, FALSE, reduce_size, reduce_data, reduce_func ); retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); // all other workers except master should do this pop here // ( none of other workers will get to __kmpc_end_reduce_nowait() ) if ( __kmp_env_consistency_check ) { if( retval == 0 ) { __kmp_pop_sync( global_tid, ct_reduce, loc ); } } } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } #if OMP_40_ENABLED if( teams_swapped ) { // Restore thread structure th->th.th_info.ds.ds_tid = 0; th->th.th_team = team; th->th.th_team_nproc = team->t.t_nproc; th->th.th_task_team = team->t.t_task_team[task_state]; th->th.th_task_state = task_state; } #endif KA_TRACE( 10, ( "__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a reduce nowait. */ void __kmpc_end_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); if( packed_reduction_method == critical_reduce_block ) { __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( on Intel platforms only ) } else if( packed_reduction_method == atomic_reduce_block ) { // neither master nor other workers should get here // (code gen does not generate this call in case 2: atomic reduce block) // actually it's better to remove this elseif at all; // after removal this value will checked by the 'else' and will assert } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { // only master gets here } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } /* 2.a.ii. Reduce Block with a terminating barrier */ /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread number @param num_vars number of items (variables) to be reduced @param reduce_size size of data in bytes to be reduced @param reduce_data pointer to data to be reduced @param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data @param lck pointer to the unique lock data structure @result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed A blocking reduce that includes an implicit barrier. */ kmp_int32 __kmpc_reduce( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck ) { KMP_COUNT_BLOCK(REDUCE_wait); int retval = 0; PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_reduce() enter: called T#%d\n", global_tid ) ); // why do we need this initialization here at all? // Reduction clause can not be a stand-alone directive. // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed // possible detection of false-positive race by the threadchecker ??? if( ! TCR_4( __kmp_init_parallel ) ) __kmp_parallel_initialize(); // check correctness of reduce block nesting #if KMP_USE_DYNAMIC_LOCK if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); #else if ( __kmp_env_consistency_check ) __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); #endif packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); if( packed_reduction_method == critical_reduce_block ) { __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); retval = 1; } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) retval = 1; } else if( packed_reduction_method == atomic_reduce_block ) { retval = 2; } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { //case tree_reduce_block: // this barrier should be visible to a customer and to the thread profiler // (it's a terminating barrier on constructs if NOWAIT not specified) #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; // needed for correct notification of frames #endif retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, TRUE, reduce_size, reduce_data, reduce_func ); retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); // all other workers except master should do this pop here // ( none of other workers except master will enter __kmpc_end_reduce() ) if ( __kmp_env_consistency_check ) { if( retval == 0 ) { // 0: all other workers; 1: master __kmp_pop_sync( global_tid, ct_reduce, loc ); } } } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } KA_TRACE( 10, ( "__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); return retval; } /*! @ingroup SYNCHRONIZATION @param loc source location information @param global_tid global thread id. @param lck pointer to the unique lock data structure Finish the execution of a blocking reduce. The <tt>lck</tt> pointer must be the same as that used in the corresponding start function. */ void __kmpc_end_reduce( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { PACKED_REDUCTION_METHOD_T packed_reduction_method; KA_TRACE( 10, ( "__kmpc_end_reduce() enter: called T#%d\n", global_tid ) ); packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); // this barrier should be visible to a customer and to the thread profiler // (it's a terminating barrier on constructs if NOWAIT not specified) if( packed_reduction_method == critical_reduce_block ) { __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( packed_reduction_method == empty_reduce_block ) { // usage: if team size == 1, no synchronization is required ( Intel platforms only ) // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( packed_reduction_method == atomic_reduce_block ) { // TODO: implicit barrier: should be exposed #if USE_ITT_NOTIFY __kmp_threads[global_tid]->th.th_ident = loc; #endif __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { // only master executes here (master releases all other workers) __kmp_end_split_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid ); } else { // should never reach this block KMP_ASSERT( 0 ); // "unexpected method" } if ( __kmp_env_consistency_check ) __kmp_pop_sync( global_tid, ct_reduce, loc ); KA_TRACE( 10, ( "__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); return; } #undef __KMP_GET_REDUCTION_METHOD #undef __KMP_SET_REDUCTION_METHOD /*-- end of interface to fast scalable reduce routines ---------------------------------------------------------------*/ kmp_uint64 __kmpc_get_taskid() { kmp_int32 gtid; kmp_info_t * thread; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); return thread->th.th_current_task->td_task_id; } // __kmpc_get_taskid kmp_uint64 __kmpc_get_parent_taskid() { kmp_int32 gtid; kmp_info_t * thread; kmp_taskdata_t * parent_task; gtid = __kmp_get_gtid(); if ( gtid < 0 ) { return 0; }; // if thread = __kmp_thread_from_gtid( gtid ); parent_task = thread->th.th_current_task->td_parent; return ( parent_task == NULL ? 0 : parent_task->td_task_id ); } // __kmpc_get_parent_taskid void __kmpc_place_threads(int nC, int nT, int nO) { if ( ! __kmp_init_serial ) { __kmp_serial_initialize(); } __kmp_place_num_cores = nC; __kmp_place_num_threads_per_core = nT; __kmp_place_core_offset = nO; } // end of file //
no_omp_cpu.c
/* * Copyright (c) 2015 - 2022, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define _GNU_SOURCE #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <sched.h> #include <assert.h> void no_omp_cpu(int num_cpu, cpu_set_t *no_omp) { int cpu_index, i; for (i = 0; i < num_cpu; ++i) { CPU_SET(i, no_omp); } #pragma omp parallel default(shared) { #pragma omp critical { cpu_index = sched_getcpu(); assert(cpu_index < num_cpu); CPU_CLR(cpu_index, no_omp); } /* end pragma omp critical */ } /* end pragam omp parallel */ } int main(int argc, char **argv) { int i, num_cpu = sysconf(_SC_NPROCESSORS_ONLN); cpu_set_t *no_omp = CPU_ALLOC(num_cpu); no_omp_cpu(num_cpu, no_omp); printf("Free cpu list: "); for (i = 0; i < num_cpu; ++i) { if (CPU_ISSET(i, no_omp)) { printf("%i ", i); } } printf("\n\n"); CPU_FREE(no_omp); return 0; }
omp_section_firstprivate.c
<ompts:test> <ompts:testdescription>Test which checks the omp section firstprivate directive by adding a variable which is defined before the parallel region.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp firstprivate</ompts:directive> <ompts:testcode> #include <stdio.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_section_firstprivate</ompts:testcode:functionname>(FILE * logFile){ <ompts:orphan:vars> int sum; int sum0; </ompts:orphan:vars> int known_sum; sum0 = 11; sum = 7; #pragma omp parallel { <ompts:orphan> #pragma omp sections <ompts:check>firstprivate(sum0)</ompts:check><ompts:crosscheck>private(sum0)</ompts:crosscheck> { #pragma omp section { #pragma omp critical { sum = sum + sum0; } /*end of critical */ } #pragma omp section { #pragma omp critical { sum = sum + sum0; } /*end of critical */ } #pragma omp section { #pragma omp critical { sum = sum + sum0; } /*end of critical */ } } /*end of sections*/ </ompts:orphan> } /* end of parallel */ known_sum = 11 * 3 + 7; return (known_sum == sum); } /* end of check_section_firstprivate*/ </ompts:testcode> </ompts:test>
sparselu-task.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdint.h> #include <stdlib.h> #include <string.h> #include <libgen.h> #include "sparselu.h" void sparselu_par_call(float **BENCH, int matrix_size, int submatrix_size) { int ii, jj, kk; #pragma omp parallel #pragma omp single nowait for (kk=0; kk<matrix_size; kk++) { lu0(BENCH[kk*matrix_size+kk], submatrix_size); for (jj=kk+1; jj<matrix_size; jj++) if (BENCH[kk*matrix_size+jj] != NULL) #pragma omp task untied firstprivate(kk, jj) shared(BENCH) { fwd(BENCH[kk*matrix_size+kk], BENCH[kk*matrix_size+jj], submatrix_size); } for (ii=kk+1; ii<matrix_size; ii++) if (BENCH[ii*matrix_size+kk] != NULL) #pragma omp task untied firstprivate(kk, ii) shared(BENCH) { bdiv (BENCH[kk*matrix_size+kk], BENCH[ii*matrix_size+kk], submatrix_size); } #pragma omp taskwait for (ii=kk+1; ii<matrix_size; ii++) if (BENCH[ii*matrix_size+kk] != NULL) for (jj=kk+1; jj<matrix_size; jj++) if (BENCH[kk*matrix_size+jj] != NULL) #pragma omp task untied firstprivate(kk, jj, ii) shared(BENCH) { if (BENCH[ii*matrix_size+jj]==NULL) BENCH[ii*matrix_size+jj] = allocate_clean_block(submatrix_size); bmod(BENCH[ii*matrix_size+kk], BENCH[kk*matrix_size+jj], BENCH[ii*matrix_size+jj], submatrix_size); } #pragma omp taskwait } }
original_block_store_diff_jacobi_omp.c
/* * Copyright (c) 2008, BSC (Barcelon Supercomputing Center) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the <organization> nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY BSC ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> #include <time.h> #define NB 32 #define B 128 #define FALSE (0) #define TRUE (1) typedef double fp_type; typedef fp_type *vin; typedef fp_type *vout; typedef fp_type *bin; typedef fp_type *binout; fp_type *A[NB][NB]; fp_type *A_new[NB][NB]; fp_type *tmp[NB][NB]; fp_type blockDelta[NB][NB]; void alloc_and_genmat() { int init_val, i, j, ii, jj; fp_type *p, *p_new; init_val = 1325; for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { A[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); A_new[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); tmp[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); blockDelta[ii][jj] = 0.0; if (A[ii][jj] == NULL || A_new[ii][jj] == NULL || tmp[ii][jj] == NULL) { printf("Out of memory\n"); exit(1); } p = A[ii][jj]; p_new = A_new[ii][jj]; for (i = 0; i < B; i++) { for (j = 0; j < B; j++) { init_val = (3125 * init_val) % 65536; (*p) = (fp_type)((init_val - 32768.0) / 16384.0); (*p_new) = (*p); p++; p_new++; } } } } } long usecs(void) { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec * 1000000 + t.tv_usec; } void clear(vout v) { int i, j, k; for (i = 0; i < B; i++) v[i] = (fp_type)0.0; } void getlastrow(bin A, vout v) { int j; for (j = 0; j < B; j++) v[j] = A[(B - 1) * B + j]; } void getlastcol(bin A, vout v) { int i; for (i = 0; i < B; i++) v[i] = A[i * B + B - 1]; } void getfirstrow(bin A, vout v) { int j; for (j = 0; j < B; j++) v[j] = A[0 * B + j]; } void getfirstcol(bin A, vout v) { int i; for (i = 0; i < B; i++) v[i] = A[i * B + 0]; } void jacobi(vin lefthalo, vin tophalo, vin righthalo, vin bottomhalo, bin A, binout A_new, fp_type* blockDelta) { int i, j; fp_type tmp; fp_type left, top, right, bottom; fp_type fullResult, deltaErr = 0.0; for (i = 0; (i < B); i++) { for (j = 0; j < B; j++) { tmp = A[i * B + j]; left = (j == 0 ? lefthalo[j] : A[i * B + j - 1]); top = (i == 0 ? tophalo[i] : A[(i - 1) * B + j]); right = (j == B - 1 ? righthalo[i] : A[i * B + j + 1]); bottom = (i == B - 1 ? bottomhalo[i] : A[(i + 1) * B + j]); // A_new[i * B + j] = 0.2 * (A[i * B + j] + left + top + right + bottom); fullResult = 0.2 * (A[i * B + j] + left + top + right + bottom); A_new[i * B + j] = fullResult; // record difference between full result and stored value // (*blockDelta) += fabs(fullResult - A_new[i * B + j]) fp_type deltaTmp = (*blockDelta); fp_type y = fabs(fullResult - A_new[i * B + j]) + deltaErr; (*blockDelta) = deltaTmp + y; deltaErr = deltaTmp - (*blockDelta); deltaErr += y; } } } double maxdelta() { double dmax = -__DBL_MAX__; int ii, jj, i, j; #pragma omp parallel for reduction(max: dmax) for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { for (i = 0; (i < B); i++) { for (j = 0; j < B; j++) { double diff = fabs(A_new[ii][jj][i * B + j] - A[ii][jj][i * B + j]); if(diff > dmax) dmax = diff; } } } } return dmax; } void compute(int niters) { int iters; int ii, jj; fp_type lefthalo[B], tophalo[B], righthalo[B], bottomhalo[B]; double delta = 2.0; double epsilon = 1e-4; iters = 0; // for (iters = 0; iters < niters; iters++) while(delta > epsilon) { ++iters; #pragma omp parallel \ private(ii, jj, lefthalo, tophalo, righthalo, bottomhalo) \ shared(A, A_new, blockDelta) { #pragma omp for schedule(static) for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { if (ii > 0) getlastrow(A[ii - 1][jj], tophalo); else clear(tophalo); if (jj > 0) getlastcol(A[ii][jj - 1], lefthalo); else clear(lefthalo); if (ii < NB - 1) getfirstrow(A[ii + 1][jj], bottomhalo); else clear(bottomhalo); if (jj < NB - 1) getfirstcol(A[ii][jj + 1], righthalo); else clear(lefthalo); jacobi(lefthalo, tophalo, righthalo, bottomhalo, A[ii][jj], A_new[ii][jj], &blockDelta[ii][jj]); } // jj } // ii } // end parallel delta = maxdelta(); printf("iteration %d: delta = %e\n", iters, delta); // yes, this is an inefficient copy // however, the library version requires you to do a copy in this way // on all of the component parts to avoid segmentation fault for(int i = 0; i < NB; ++i) { for(int j = 0; j < NB; ++j) { for(int k = 0; k < B; ++k) { for(int l = 0; l < B; ++l) { A[i][j][k * B + l] = A_new[i][j][k * B + l]; } } } } } // iter } int main(int argc, char *argv[]) { int niters; // pp_time_t tm; // memset( &tm, 0, sizeof(tm) ); struct timespec start, end; if (argc > 1) { niters = atoi(argv[1]); } else niters = 1; alloc_and_genmat(); clock_gettime(CLOCK_MONOTONIC, &start); compute(niters); clock_gettime(CLOCK_MONOTONIC, &end); double time_taken = (end.tv_sec - start.tv_sec) * 1e9; time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9; printf("Running time = %g %s\n", time_taken, "s"); FILE *outFile; outFile = fopen("./jacobi_omp_values.txt", "w"); if (outFile == NULL) { fprintf(stderr, "Error writing to file\n"); } else { int ii, jj, i, j; for (ii = 0; ii < NB; ++ii) for (jj = 0; jj < NB; ++jj) for (i = 0; i < B; ++i) for (j = 0; j < B; ++j) fprintf(outFile, "%.15f\n", A[ii][jj][i * B + j]); fclose(outFile); } return 0; }
_CPULingo.c
/* C implementation of SIML LINGOs on CPUs #============================================================================================= # COPYRIGHT NOTICE # # Written by Imran S. Haque (ihaque@cs.stanford.edu) # # Copyright (c) 2009-2010 Stanford University. # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of Stanford University nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #============================================================================================= */ #include "Python.h" #include "numpy/arrayobject.h" #include <stdint.h> #include <stdio.h> #ifdef USE_OPENMP #include <omp.h> #endif inline int32_t max(int32_t a,int32_t b) { return a>b ? a : b; } inline int32_t min(int32_t a,int32_t b) { return a<b?a:b; } static float multisetTanimoto3_mags(int32_t* a,int32_t* b,int32_t* asize,int32_t* bsize,int32_t alen,int32_t blen,int32_t amag,int32_t bmag) { // Version of Tanimoto code that uses explicit branches if (amag == 0 || bmag == 0) return 0.0f; int32_t i=0,j=0; int32_t isct=0; while ( i < alen && j < blen) { if (a[i] == b[j]) { isct += min(asize[i],bsize[j]); i++; j++; } else if (a[i] < b[j]) { i++; } else { // b[j] < a[i] j++; } } return isct/((float)amag+bmag-isct); } #define CHECKARRAYTYPE(ary,name) if (PyArray_TYPE(ary) != NPY_INT32) {\ PyErr_SetString(PyExc_ValueError,name" was not of type int32");\ return NULL;\ } #define CHECKARRAYCARRAY(ary,name) if ((PyArray_FLAGS(ary) & NPY_CARRAY) != NPY_CARRAY) {\ PyErr_SetString(PyExc_ValueError,name" was not a contiguous well-behaved array in C order");\ return NULL;\ } static PyObject *_CPULingo_getTanimotoBlock(PyObject *self, PyObject *args) { npy_intp dim2[2]; int32_t *reflingos,*refcounts,*refmags,*reflengths; int32_t *qlingos,*qcounts,*qmags,*qlengths; npy_intp *rldims,*rcdims,*rlstrides,*rcstrides,*qldims,*qcdims,*qlstrides,*qcstrides,*tan_strides; int nrefmols,nqmols; PyArrayObject* ary_tanimotos; float* tanimotos; int32_t *reflingoset,*refcountset; int32_t refmag,reflength; float* outputrow; int32_t *qlingoset,*qcountset; int32_t qmag,qlength; int row,col; float t; int nprocs=1; PyArrayObject *ary_reflingos,*ary_refcounts,*ary_refmags,*ary_reflengths,*ary_qlingos,*ary_qcounts,*ary_qmags,*ary_qlengths; if (!PyArg_ParseTuple(args, "OOOOOOOO|i", &ary_reflingos, &ary_refcounts, &ary_refmags, &ary_reflengths, &ary_qlingos, &ary_qcounts, &ary_qmags, &ary_qlengths, &nprocs)) { return NULL; } // This is a serial function. We only accept the argument so that we can // support the interface for gTBParallel if the user did not enable OpenMP if (nprocs != 1) { //fprintf(stderr,"Warning: called _CPULingo.getTanimotoBlocks or getTanimotoBlocksParallel requesting more than one CPU and pysiml not built with OpenMP support. Only using one CPU.\n"); nprocs = 1; } // Get pointers to array data reflingos = (int32_t*) PyArray_DATA(ary_reflingos); refcounts = (int32_t*) PyArray_DATA(ary_refcounts); refmags = (int32_t*) PyArray_DATA(ary_refmags); reflengths = (int32_t*) PyArray_DATA(ary_reflengths); qlingos = (int32_t*) PyArray_DATA(ary_qlingos); qcounts = (int32_t*) PyArray_DATA(ary_qcounts); qmags = (int32_t*) PyArray_DATA(ary_qmags); qlengths = (int32_t*) PyArray_DATA(ary_qlengths); // Get dimensions of arrays (# molecules, maxlingos) rldims = PyArray_DIMS(ary_reflingos); rlstrides = PyArray_STRIDES(ary_reflingos); rcdims = PyArray_DIMS(ary_refcounts); rcstrides = PyArray_STRIDES(ary_refcounts); qldims = PyArray_DIMS(ary_qlingos); qlstrides = PyArray_STRIDES(ary_qlingos); qcdims = PyArray_DIMS(ary_qcounts); qcstrides = PyArray_STRIDES(ary_qcounts); // Do some sanity checking on array dimensions {{{ // - make sure they are of int32 data type CHECKARRAYTYPE(ary_reflingos,"Reference Lingo matrix"); CHECKARRAYTYPE(ary_refcounts,"Reference Lingo count matrix"); CHECKARRAYTYPE(ary_refmags,"Reference magnitude vector"); CHECKARRAYTYPE(ary_reflengths,"Reference length vector"); CHECKARRAYTYPE(ary_qlingos,"Query Lingo matrix"); CHECKARRAYTYPE(ary_qcounts,"Query Lingo count matrix"); CHECKARRAYTYPE(ary_qmags,"Query magnitude vector"); CHECKARRAYTYPE(ary_qlengths,"Query length vector"); // - make sure lingo/count/mag arrays are 2d and are the same size in a set (ref/q) if (ary_reflingos->nd != 2) { PyErr_SetString(PyExc_ValueError,"Reference Lingo matrix did not have dimension 2"); return NULL; } if (ary_refcounts->nd != 2) { PyErr_SetString(PyExc_ValueError,"Reference Lingo count matrix did not have dimension 2"); return NULL; } if (rldims[0] != rcdims[0] || rldims[1] != rcdims[1]) { PyErr_SetString(PyExc_ValueError,"Reference Lingo and Lingo count matrix did not have identical shapes"); return NULL; } if (ary_qlingos->nd != 2) { PyErr_SetString(PyExc_ValueError,"Query Lingo matrix did not have dimension 2"); return NULL; } if (ary_qcounts->nd != 2) { PyErr_SetString(PyExc_ValueError,"Query Lingo count matrix did not have dimension 2"); return NULL; } if (qldims[0] != qcdims[0] || qldims[1] != qcdims[1]) { PyErr_SetString(PyExc_ValueError,"Query Lingo and Lingo count matrix did not have identical shapes"); return NULL; } // - make sure stride is 4 in last dimension (ie, is C-style and contiguous) CHECKARRAYCARRAY(ary_reflingos,"Reference Lingo matrix"); CHECKARRAYCARRAY(ary_refcounts,"Reference Lingo count matrix"); CHECKARRAYCARRAY(ary_refmags,"Reference magnitude vector"); CHECKARRAYCARRAY(ary_reflengths,"Reference length vector"); CHECKARRAYCARRAY(ary_qlingos,"Query Lingo matrix"); CHECKARRAYCARRAY(ary_qcounts,"Query Lingo count matrix"); CHECKARRAYCARRAY(ary_qmags,"Query magnitude vector"); CHECKARRAYCARRAY(ary_qlengths,"Query length vector"); // - make sure lengths/mags are 1d or (Nx1) and have same length as #rows of l/c arrays if (!(ary_reflengths->nd == 1 || (ary_reflengths->nd == 2 && ary_reflengths->dimensions[1] == 1))) { PyErr_SetString(PyExc_ValueError,"Reference length vector was not 1-D"); return NULL; } if (ary_reflengths->dimensions[0] != rldims[0]) { PyErr_SetString(PyExc_ValueError,"Reference length vector length did not equal number of rows of reference Lingo matrix"); return NULL; } if (!(ary_refmags->nd == 1 || (ary_refmags->nd == 2 && ary_refmags->dimensions[1] == 1))) { PyErr_SetString(PyExc_ValueError,"Reference magnitude vector was not 1-D"); return NULL; } if (ary_refmags->dimensions[0] != rldims[0]) { PyErr_SetString(PyExc_ValueError,"Reference magnitude vector length did not equal number of rows of reference Lingo matrix"); return NULL; } if (!(ary_qlengths->nd == 1 || (ary_qlengths->nd == 2 && ary_qlengths->dimensions[1] == 1))) { PyErr_SetString(PyExc_ValueError,"Query length vector was not 1-D"); return NULL; } if (ary_qlengths->dimensions[0] != qldims[0]) { PyErr_SetString(PyExc_ValueError,"Query length vector length did not equal number of rows of query Lingo matrix"); return NULL; } if (!(ary_qmags->nd == 1 || (ary_qmags->nd == 2 && ary_qmags->dimensions[1] == 1))) { PyErr_SetString(PyExc_ValueError,"Query magnitude vector was not 1-D"); return NULL; } if (ary_qmags->dimensions[0] != qldims[0]) { PyErr_SetString(PyExc_ValueError,"Query magnitude vector length did not equal number of rows of query Lingo matrix"); return NULL; } // }}} /* printf("Got reference matrix of size %ld x %ld and stride (%ld,%ld)\n",rldims[0],rldims[1],rlstrides[0],rlstrides[1]); printf("Got reference lengths of size %ld and stride %ld\n",PyArray_DIMS(ary_reflengths)[0],PyArray_STRIDES(ary_reflengths)[0]); printf("Got reference mags of size %ld and stride %ld\n",PyArray_DIMS(ary_refmags)[0],PyArray_STRIDES(ary_refmags)[0]); printf("Got query matrix of size %ld x %ld and stride (%ld,%ld)\n",qldims[0],qldims[1],qlstrides[0],qlstrides[1]); printf("Got query lengths of size %ld and stride %ld\n",PyArray_DIMS(ary_qlengths)[0],PyArray_STRIDES(ary_qlengths)[0]); printf("Got query mags of size %ld and stride %ld\n",PyArray_DIMS(ary_qmags)[0],PyArray_STRIDES(ary_qmags)[0]); */ nrefmols = rldims[0]; nqmols = qldims[0]; // Create return array containing Tanimotos dim2[0] = nrefmols; dim2[1] = nqmols; ary_tanimotos = (PyArrayObject*) PyArray_SimpleNew(2,dim2,NPY_FLOAT); tanimotos = (float*) PyArray_DATA(ary_tanimotos); tan_strides = PyArray_STRIDES(ary_tanimotos); // Fill this array with Tanimotos, one element at a time... for (row = 0; row < nrefmols; row++) { reflingoset = reflingos + row*rlstrides[0]/4; refcountset = refcounts + row*rcstrides[0]/4; refmag = refmags[row]; reflength = reflengths[row]; outputrow = tanimotos + row*tan_strides[0]/4; //printf("Got reference set Lingos:"); //for (i = 0; i < reflength; i++) printf(" %08x",reflingoset[i]); //printf("\n"); //printf("Got reference set counts:"); //for (i = 0; i < reflength; i++) printf(" %08x",refcountset[i]); //printf("\n"); //printf("Got reference set length %d, magnitude %d\n",reflength,refmag); for (col = 0; col < nqmols; col++) { qlingoset = qlingos + col*qlstrides[0]/4; qcountset = qcounts + col*qcstrides[0]/4; qmag = qmags[col]; qlength = qlengths[col]; //printf("\tGot query set Lingos:"); //for (i = 0; i < qlength; i++) printf(" %08x",qlingoset[i]); //printf("\n"); //printf("\tGot query set counts:"); //for (i = 0; i < qlength; i++) printf(" %08x",qcountset[i]); //printf("\n"); //printf("\tGot query set length %d, magnitude %d\n",qlength,qmag); t = multisetTanimoto3_mags(reflingoset,qlingoset,refcountset,qcountset,reflength,qlength,refmag,qmag); outputrow[col] = t; //printf("\tTanimoto = %f\n",t); } } return PyArray_Return(ary_tanimotos); } #ifdef USE_OPENMP static PyObject *_CPULingo_getTanimotoBlockParallel(PyObject *self, PyObject *args) { npy_intp dim2[2]; int32_t *reflingos,*refcounts,*refmags,*reflengths; int32_t *qlingos,*qcounts,*qmags,*qlengths; npy_intp *rldims,*rcdims,*rlstrides,*rcstrides,*qldims,*qcdims,*qlstrides,*qcstrides,*tan_strides; int nrefmols,nqmols; PyArrayObject* ary_tanimotos; float* tanimotos; int32_t *reflingoset,*refcountset; int32_t refmag,reflength; float* outputrow; int32_t *qlingoset,*qcountset; int32_t qmag,qlength; int row,col; float t; int nprocs=0; PyArrayObject *ary_reflingos,*ary_refcounts,*ary_refmags,*ary_reflengths,*ary_qlingos,*ary_qcounts,*ary_qmags,*ary_qlengths; if (!PyArg_ParseTuple(args, "OOOOOOOO|i", &ary_reflingos, &ary_refcounts, &ary_refmags, &ary_reflengths, &ary_qlingos, &ary_qcounts, &ary_qmags, &ary_qlengths, &nprocs)) { return NULL; } // Get pointers to array data reflingos = (int32_t*) PyArray_DATA(ary_reflingos); refcounts = (int32_t*) PyArray_DATA(ary_refcounts); refmags = (int32_t*) PyArray_DATA(ary_refmags); reflengths = (int32_t*) PyArray_DATA(ary_reflengths); qlingos = (int32_t*) PyArray_DATA(ary_qlingos); qcounts = (int32_t*) PyArray_DATA(ary_qcounts); qmags = (int32_t*) PyArray_DATA(ary_qmags); qlengths = (int32_t*) PyArray_DATA(ary_qlengths); // Get dimensions of arrays (# molecules, maxlingos) rldims = PyArray_DIMS(ary_reflingos); rlstrides = PyArray_STRIDES(ary_reflingos); rcdims = PyArray_DIMS(ary_refcounts); rcstrides = PyArray_STRIDES(ary_refcounts); qldims = PyArray_DIMS(ary_qlingos); qlstrides = PyArray_STRIDES(ary_qlingos); qcdims = PyArray_DIMS(ary_qcounts); qcstrides = PyArray_STRIDES(ary_qcounts); // Do some sanity checking on array dimensions {{{ // - make sure they are of int32 data type CHECKARRAYTYPE(ary_reflingos,"Reference Lingo matrix"); CHECKARRAYTYPE(ary_refcounts,"Reference Lingo count matrix"); CHECKARRAYTYPE(ary_refmags,"Reference magnitude vector"); CHECKARRAYTYPE(ary_reflengths,"Reference length vector"); CHECKARRAYTYPE(ary_qlingos,"Query Lingo matrix"); CHECKARRAYTYPE(ary_qcounts,"Query Lingo count matrix"); CHECKARRAYTYPE(ary_qmags,"Query magnitude vector"); CHECKARRAYTYPE(ary_qlengths,"Query length vector"); // - make sure lingo/count/mag arrays are 2d and are the same size in a set (ref/q) if (ary_reflingos->nd != 2) { PyErr_SetString(PyExc_TypeError,"Reference Lingo matrix did not have dimension 2"); return NULL; } if (ary_refcounts->nd != 2) { PyErr_SetString(PyExc_TypeError,"Reference Lingo count matrix did not have dimension 2"); return NULL; } if (rldims[0] != rcdims[0] || rldims[1] != rcdims[1]) { PyErr_SetString(PyExc_TypeError,"Reference Lingo and Lingo count matrix did not have identical shapes"); return NULL; } if (ary_qlingos->nd != 2) { PyErr_SetString(PyExc_TypeError,"Query Lingo matrix did not have dimension 2"); return NULL; } if (ary_qcounts->nd != 2) { PyErr_SetString(PyExc_TypeError,"Query Lingo count matrix did not have dimension 2"); return NULL; } if (qldims[0] != qcdims[0] || qldims[1] != qcdims[1]) { PyErr_SetString(PyExc_TypeError,"Query Lingo and Lingo count matrix did not have identical shapes"); return NULL; } // - make sure stride is 4 in last dimension (ie, is C-style and contiguous) CHECKARRAYCARRAY(ary_reflingos,"Reference Lingo matrix"); CHECKARRAYCARRAY(ary_refcounts,"Reference Lingo count matrix"); CHECKARRAYCARRAY(ary_refmags,"Reference magnitude vector"); CHECKARRAYCARRAY(ary_reflengths,"Reference length vector"); CHECKARRAYCARRAY(ary_qlingos,"Query Lingo matrix"); CHECKARRAYCARRAY(ary_qcounts,"Query Lingo count matrix"); CHECKARRAYCARRAY(ary_qmags,"Query magnitude vector"); CHECKARRAYCARRAY(ary_qlengths,"Query length vector"); // - make sure lengths/mags are 1d or (Nx1) and have same length as #rows of l/c arrays if (!(ary_reflengths->nd == 1 || (ary_reflengths->nd == 2 && ary_reflengths->dimensions[1] == 1))) { PyErr_SetString(PyExc_TypeError,"Reference length vector was not 1-D"); return NULL; } if (ary_reflengths->dimensions[0] != rldims[0]) { PyErr_SetString(PyExc_TypeError,"Reference length vector length did not equal number of rows of reference Lingo matrix"); return NULL; } if (!(ary_refmags->nd == 1 || (ary_refmags->nd == 2 && ary_refmags->dimensions[1] == 1))) { PyErr_SetString(PyExc_TypeError,"Reference magnitude vector was not 1-D"); return NULL; } if (ary_refmags->dimensions[0] != rldims[0]) { PyErr_SetString(PyExc_TypeError,"Reference magnitude vector length did not equal number of rows of reference Lingo matrix"); return NULL; } if (!(ary_qlengths->nd == 1 || (ary_qlengths->nd == 2 && ary_qlengths->dimensions[1] == 1))) { PyErr_SetString(PyExc_TypeError,"Query length vector was not 1-D"); return NULL; } if (ary_qlengths->dimensions[0] != qldims[0]) { PyErr_SetString(PyExc_TypeError,"Query length vector length did not equal number of rows of query Lingo matrix"); return NULL; } if (!(ary_qmags->nd == 1 || (ary_qmags->nd == 2 && ary_qmags->dimensions[1] == 1))) { PyErr_SetString(PyExc_TypeError,"Query magnitude vector was not 1-D"); return NULL; } if (ary_qmags->dimensions[0] != qldims[0]) { PyErr_SetString(PyExc_TypeError,"Query magnitude vector length did not equal number of rows of query Lingo matrix"); return NULL; } // }}} /* printf("Got reference matrix of size %ld x %ld and stride (%ld,%ld)\n",rldims[0],rldims[1],rlstrides[0],rlstrides[1]); printf("Got reference lengths of size %ld and stride %ld\n",PyArray_DIMS(ary_reflengths)[0],PyArray_STRIDES(ary_reflengths)[0]); printf("Got reference mags of size %ld and stride %ld\n",PyArray_DIMS(ary_refmags)[0],PyArray_STRIDES(ary_refmags)[0]); printf("Got query matrix of size %ld x %ld and stride (%ld,%ld)\n",qldims[0],qldims[1],qlstrides[0],qlstrides[1]); printf("Got query lengths of size %ld and stride %ld\n",PyArray_DIMS(ary_qlengths)[0],PyArray_STRIDES(ary_qlengths)[0]); printf("Got query mags of size %ld and stride %ld\n",PyArray_DIMS(ary_qmags)[0],PyArray_STRIDES(ary_qmags)[0]); */ nrefmols = rldims[0]; nqmols = qldims[0]; // Create return array containing Tanimotos dim2[0] = nrefmols; dim2[1] = nqmols; ary_tanimotos = (PyArrayObject*) PyArray_SimpleNew(2,dim2,NPY_FLOAT); tanimotos = (float*) PyArray_DATA(ary_tanimotos); tan_strides = PyArray_STRIDES(ary_tanimotos); // Fill this array with Tanimotos, parallelized over rows if (nprocs > 0) omp_set_num_threads(nprocs); #pragma omp parallel for default(none) shared(nrefmols,nqmols,rlstrides,rcstrides,reflingos,refcounts,refmags,reflengths,tanimotos,tan_strides,qlingos,qlstrides,qcounts,qcstrides,qmags,qlengths) private(row,col,reflingoset,refcountset,refmag,reflength,qlingoset,qcountset,qmag,qlength,t,outputrow) for (row = 0; row < nrefmols; row++) { reflingoset = reflingos + row*rlstrides[0]/4; refcountset = refcounts + row*rcstrides[0]/4; refmag = refmags[row]; reflength = reflengths[row]; outputrow = tanimotos + row*tan_strides[0]/4; //printf("Got reference set Lingos:"); //for (i = 0; i < reflength; i++) printf(" %08x",reflingoset[i]); //printf("\n"); //printf("Got reference set counts:"); //for (i = 0; i < reflength; i++) printf(" %08x",refcountset[i]); //printf("\n"); //printf("Got reference set length %d, magnitude %d\n",reflength,refmag); for (col = 0; col < nqmols; col++) { qlingoset = qlingos + col*qlstrides[0]/4; qcountset = qcounts + col*qcstrides[0]/4; qmag = qmags[col]; qlength = qlengths[col]; //printf("\tGot query set Lingos:"); //for (i = 0; i < qlength; i++) printf(" %08x",qlingoset[i]); //printf("\n"); //printf("\tGot query set counts:"); //for (i = 0; i < qlength; i++) printf(" %08x",qcountset[i]); //printf("\n"); //printf("\tGot query set length %d, magnitude %d\n",qlength,qmag); t = multisetTanimoto3_mags(reflingoset,qlingoset,refcountset,qcountset,reflength,qlength,refmag,qmag); outputrow[col] = t; //printf("\tTanimoto = %f\n",t); } } return PyArray_Return(ary_tanimotos); } static PyObject *_CPULingo_supportsParallel(PyObject *self, PyObject *args) { return Py_True; } #else static PyObject *_CPULingo_supportsParallel(PyObject *self, PyObject *args) { return Py_False; } #endif static PyMethodDef _CPULingo_methods[] = { {"getTanimotoBlock", (PyCFunction)_CPULingo_getTanimotoBlock, METH_VARARGS, "Computes a block of Tanimotos using the sparse-vector SIML algorithm"}, {"supportsParallel", (PyCFunction)_CPULingo_supportsParallel, METH_VARARGS, "Returns True if pySIML was built with OpenMP support"}, #ifdef USE_OPENMP {"getTanimotoBlockParallel", (PyCFunction)_CPULingo_getTanimotoBlockParallel, METH_VARARGS, "Computes a block of Tanimotos using the sparse-vector SIML algorithm, parallelized over rows"}, #else {"getTanimotoBlockParallel", (PyCFunction)_CPULingo_getTanimotoBlock, METH_VARARGS, "Computes a block of Tanimotos using the sparse-vector SIML algorithm (warning: pysiml built without OpenMP support, this function is not parallelized)"}, #endif {NULL, NULL, 0, NULL} }; DL_EXPORT(void) init_CPULingo(void) { Py_InitModule3("_CPULingo", _CPULingo_methods, "Computes LINGO Tanimotos using the SIML method\n"); import_array(); }
HDF5SubdomainDumperMPI.h
// // HDF5SubdomainDumperMPI.h // Cubism // // Created by Fabian Wermelinger 2018-08-03 // Copyright 2018 ETH Zurich. All rights reserved. // #ifndef HDF5SUBDOMAINDUMPERMPI_H_UAFPTNPL #define HDF5SUBDOMAINDUMPERMPI_H_UAFPTNPL #include <cassert> #include <iostream> #include <vector> #include <string> #include <sstream> #include <mpi.h> #include "HDF5Dumper.h" CUBISM_NAMESPACE_BEGIN /////////////////////////////////////////////////////////////////////////////// // helpers namespace SubdomainTypesMPI { template <typename TGrid> class Subdomain : public SubdomainTypes::Subdomain<TGrid> { public: template <typename TSubdomain> static std::vector<TSubdomain> getEntities(ArgumentParser& parser, TGrid& grid) { return SubdomainTypes::Subdomain<TGrid>::template getEntities<TSubdomain>(parser, grid); } public: typedef TGrid GridType; // bb_start: cell index within which the bounding box start (lower left) lies // bb_end: cell index within which the bounding box end (upper right) lies Subdomain(TGrid* grid, const int id, const double start[3], const double end[3], const double* h[3], const int bb_start[3]=0, const int bb_end[3]=0) : SubdomainTypes::Subdomain<TGrid>(grid, id, start, end, h, bb_start, bb_end), m_suboffset{0} { int myrank; int color = static_cast<int>( this->m_valid ); MPI_Comm comm = this->m_grid->getCartComm(); MPI_Comm subcomm; MPI_Comm_rank(comm, &myrank); MPI_Comm_split(comm, color, myrank, &subcomm); int pe_coords[3]; this->m_grid->peindex(pe_coords); // compute offsets this->m_max_size = 1; unsigned long g_max_size = 0; if (this->m_valid) { // 1. determine dimension and create cartesian sub-communicator int pe_shift[3] = { pe_coords[0], pe_coords[1], pe_coords[2] }; MPI_Bcast(pe_shift, 3, MPI_INT, 0, subcomm); for (int i = 0; i < 3; ++i) pe_coords[i] -= pe_shift[i]; int pe_subdims[3]; for (int i = 0; i < 3; ++i) { MPI_Allreduce(&pe_coords[i], &pe_subdims[i], 1, MPI_INT, MPI_MAX, subcomm); pe_subdims[i] += 1; // shift from index to dimension space } MPI_Comm subcartcomm; int periodic[3] = {true}; MPI_Cart_create(subcomm, 3, pe_subdims, periodic, false, &subcartcomm); // 2. compute file offsets using reduced 1D communicators int subdims[][3] = { {true, false, false}, {false, true, false}, {false, false, true} }; for (int i = 0; i < 3; ++i) { MPI_Comm dimcomm; MPI_Cart_sub(subcartcomm, subdims[i], &dimcomm); MPI_Exscan(&this->m_subcount[i], &m_suboffset[i], 1, MPI_INT, MPI_SUM, dimcomm); MPI_Comm_free(&dimcomm); } MPI_Comm_free(&subcartcomm); // 3. reduce maximum element size of subdomain to all // others in the sub-communicator for (int i = 0; i < 3; ++i) this->m_max_size *= static_cast<unsigned long>( this->m_subcount[i] ); MPI_Allreduce(&(this->m_max_size), &g_max_size, 1, MPI_UNSIGNED_LONG, MPI_MAX, subcomm); } MPI_Comm_free(&subcomm); // 4. update maximum size globally MPI_Allreduce(&g_max_size, &(this->m_max_size), 1, MPI_UNSIGNED_LONG, MPI_MAX, comm); } Subdomain(const Subdomain& c) = default; inline const int (&offset() const)[3] { return m_suboffset; } virtual void show(const std::string prefix="") const { std::cout << prefix << "subdomain" << this->m_id << ":" << std::endl; std::cout << prefix << "ID = " << this->m_id << std::endl; std::cout << prefix << "START = (" << this->m_start[0] << ", " << this->m_start[1] << ", " << this->m_start[2] << ")" << std::endl; std::cout << prefix << "END = (" << this->m_end[0] << ", " << this->m_end[1] << ", " << this->m_end[2] << ")" << std::endl; std::cout << prefix << "BBOX_START = (" << this->m_bbox_start[0] << ", " << this->m_bbox_start[1] << ", " << this->m_bbox_start[2] << ")" << std::endl; std::cout << prefix << "BBOX_END = (" << this->m_bbox_end[0] << ", " << this->m_bbox_end[1] << ", " << this->m_bbox_end[2] << ")" << std::endl; std::cout << prefix << "DIM = (" << this->m_subdim[0] << ", " << this->m_subdim[1] << ", " << this->m_subdim[2] << ")" << std::endl; std::cout << prefix << "SUBDIM = (" << this->m_subcount[0] << ", " << this->m_subcount[1] << ", " << this->m_subcount[2] << ")" << std::endl; std::cout << prefix << "OFFSET = (" << this->m_suboffset[0] << ", " << this->m_suboffset[1] << ", " << this->m_suboffset[2] << ")" << std::endl; std::cout << prefix << "MAXSIZE = " << this->m_max_size << std::endl; std::cout << prefix << "VALID = " << this->m_valid << std::endl; std::cout << prefix << "NUMBER OF BLOCKS = " << this->m_intersecting_blocks.size() << std::endl; } protected: int m_suboffset[3]; // index offset for my subdomain }; } /////////////////////////////////////////////////////////////////////////////// // Dumpers // // The following requirements for the data TStreamer are required: // TStreamer::NCHANNELS : Number of data elements (1=Scalar, 3=Vector, 9=Tensor) // TStreamer::operate : Data access methods for read and write // TStreamer::getAttributeName : Attribute name of the date ("Scalar", "Vector", "Tensor") template<typename TStreamer, typename hdf5Real, typename TSubdomain> void DumpSubdomainHDF5MPI(const TSubdomain& subdomain, const typename TSubdomain::GridType::Real t, const std::string &fileroot, const std::string &dirname = ".", const bool bXMF = true) { #ifdef CUBISM_USE_HDF typedef typename TSubdomain::GridType::BlockType B; std::string filename_h5 = fileroot + ".h5"; std::string fullpath_h5 = dirname + "/" + filename_h5; std::string fullpath_xmf = dirname + "/" + fileroot + ".xmf"; int rank; MPI_Comm comm = subdomain.getGrid()->getCartComm(); MPI_Comm_rank(comm, &rank); herr_t status; hid_t file_id, dataset_id, fspace_id, fapl_id, mspace_id; /////////////////////////////////////////////////////////////////////////// // write mesh std::vector<int> mesh_dims; std::vector<std::string> dset_name; dset_name.push_back("/vx"); dset_name.push_back("/vy"); dset_name.push_back("/vz"); if (0 == rank) { H5open(); fapl_id = H5Pcreate(H5P_FILE_ACCESS); file_id = H5Fcreate(fullpath_h5.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); status = H5Pclose(fapl_id); for (size_t i = 0; i < 3; ++i) { const int nCells = subdomain.dim(i); const double* const h = subdomain.grid_spacing(i); std::vector<double> vertices(nCells+1, subdomain.start(i)); mesh_dims.push_back(vertices.size()); for (int j = 0; j < nCells; ++j) vertices[j+1] = vertices[j] + h[j];; hsize_t dim[1] = {vertices.size()}; fspace_id = H5Screate_simple(1, dim, NULL); #ifndef CUBISM_ON_FERMI dataset_id = H5Dcreate(file_id, dset_name[i].c_str(), H5T_NATIVE_DOUBLE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #else dataset_id = H5Dcreate2(file_id, dset_name[i].c_str(), H5T_NATIVE_DOUBLE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #endif status = H5Dwrite(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, vertices.data()); status = H5Sclose(fspace_id); status = H5Dclose(dataset_id); } // shutdown h5 file status = H5Fclose(file_id); H5close(); } MPI_Barrier(comm); /////////////////////////////////////////////////////////////////////////// // startup file H5open(); fapl_id = H5Pcreate(H5P_FILE_ACCESS); status = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); if(status<0) H5Eprint1(stdout); file_id = H5Fopen(fullpath_h5.c_str(), H5F_ACC_RDWR, fapl_id); status = H5Pclose(fapl_id); if(status<0) H5Eprint1(stdout); /////////////////////////////////////////////////////////////////////////// // write data std::vector<BlockInfo> infos_sub = subdomain.getBlocksInfo(); static const unsigned int NCHANNELS = TStreamer::NCHANNELS; const unsigned int NX = subdomain.count()[0]; const unsigned int NY = subdomain.count()[1]; const unsigned int NZ = subdomain.count()[2]; const unsigned int DX = subdomain.dim()[0]; const unsigned int DY = subdomain.dim()[1]; const unsigned int DZ = subdomain.dim()[2]; if (rank==0) { std::cout << "Allocating " << (subdomain.max_size() * NCHANNELS * sizeof(hdf5Real))/(1024.*1024.) << " MB of HDF5 subdomain data"; std::cout << " (Total " << (DX * DY * DZ * NCHANNELS * sizeof(hdf5Real))/(1024.*1024.) << " MB)" << std::endl; } hsize_t count[4] = { NZ, NY, NX, NCHANNELS }; hsize_t dims[4] = { DZ, DY, DX, NCHANNELS }; hsize_t offset[4] = { static_cast<hsize_t>(subdomain.offset()[2]), static_cast<hsize_t>(subdomain.offset()[1]), static_cast<hsize_t>(subdomain.offset()[0]), 0 }; hdf5Real * array_all = NULL; if (subdomain.valid()) { array_all = new hdf5Real[NX * NY * NZ * NCHANNELS]; const int bbox_start[3] = { subdomain.bbox_start()[0], subdomain.bbox_start()[1], subdomain.bbox_start()[2] }; const int bbox_end[3] = { subdomain.bbox_end()[0], subdomain.bbox_end()[1], subdomain.bbox_end()[2] }; #pragma omp parallel for for(int i=0; i<(int)infos_sub.size(); i++) { BlockInfo& info = infos_sub[i]; const B& b = *(B*)info.ptrBlock; const int idx[3] = { info.index[0], info.index[1], info.index[2] }; for(int iz=0; iz<static_cast<int>(B::sizeZ); iz++) for(int iy=0; iy<static_cast<int>(B::sizeY); iy++) for(int ix=0; ix<static_cast<int>(B::sizeX); ix++) { // cell local check: continue if the cell does not // intersect the subdomain bounding box. int gx = idx[0]*B::sizeX + ix; int gy = idx[1]*B::sizeY + iy; int gz = idx[2]*B::sizeZ + iz; const bool b_containedX = (bbox_start[0] <= gx) && (gx <= bbox_end[0]); const bool b_containedY = (bbox_start[1] <= gy) && (gy <= bbox_end[1]); const bool b_containedZ = (bbox_start[2] <= gz) && (gz <= bbox_end[2]); if (!(b_containedX && b_containedY && b_containedZ)) continue; hdf5Real output[NCHANNELS]; for(unsigned int j=0; j<NCHANNELS; ++j) output[j] = 0; TStreamer::operate(b, ix, iy, iz, (hdf5Real*)output); // shift the indices to subdomain index space gx -= bbox_start[0]; gy -= bbox_start[1]; gz -= bbox_start[2]; hdf5Real * const ptr = array_all + NCHANNELS*(gx + NX * (gy + NY * gz)); for(unsigned int j=0; j<NCHANNELS; ++j) ptr[j] = output[j]; } } } fapl_id = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(fapl_id, H5FD_MPIO_COLLECTIVE); fspace_id = H5Screate_simple(4, dims, NULL); #ifndef CUBISM_ON_FERMI dataset_id = H5Dcreate(file_id, "data", get_hdf5_type<hdf5Real>(), fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #else dataset_id = H5Dcreate2(file_id, "data", get_hdf5_type<hdf5Real>(), fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #endif fspace_id = H5Dget_space(dataset_id); H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, offset, NULL, count, NULL); mspace_id = H5Screate_simple(4, count, NULL); if (!subdomain.valid()) { H5Sselect_none(fspace_id); H5Sselect_none(mspace_id); } status = H5Dwrite(dataset_id, get_hdf5_type<hdf5Real>(), mspace_id, fspace_id, fapl_id, array_all); if (status < 0) H5Eprint1(stdout); status = H5Sclose(mspace_id); if(status<0) H5Eprint1(stdout); status = H5Sclose(fspace_id); if(status<0) H5Eprint1(stdout); status = H5Dclose(dataset_id); if(status<0) H5Eprint1(stdout); status = H5Pclose(fapl_id); if(status<0) H5Eprint1(stdout); status = H5Fclose(file_id); if(status<0) H5Eprint1(stdout); H5close(); if (subdomain.valid()) delete [] array_all; if (bXMF && rank==0) { FILE *xmf = 0; xmf = fopen(fullpath_xmf.c_str(), "w"); fprintf(xmf, "<?xml version=\"1.0\" ?>\n"); fprintf(xmf, "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\n"); fprintf(xmf, "<Xdmf Version=\"2.0\">\n"); fprintf(xmf, " <Domain>\n"); fprintf(xmf, " <Grid GridType=\"Uniform\">\n"); fprintf(xmf, " <Time Value=\"%e\"/>\n\n", t); fprintf(xmf, " <Topology TopologyType=\"3DRectMesh\" Dimensions=\"%d %d %d\"/>\n\n", mesh_dims[2], mesh_dims[1], mesh_dims[0]); fprintf(xmf, " <Geometry GeometryType=\"VxVyVz\">\n"); fprintf(xmf, " <DataItem Name=\"mesh_vx\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[0]); fprintf(xmf, " %s:/vx\n", filename_h5.c_str()); fprintf(xmf, " </DataItem>\n"); fprintf(xmf, " <DataItem Name=\"mesh_vy\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[1]); fprintf(xmf, " %s:/vy\n", filename_h5.c_str()); fprintf(xmf, " </DataItem>\n"); fprintf(xmf, " <DataItem Name=\"mesh_vz\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[2]); fprintf(xmf, " %s:/vz\n", filename_h5.c_str()); fprintf(xmf, " </DataItem>\n"); fprintf(xmf, " </Geometry>\n\n"); fprintf(xmf, " <Attribute Name=\"data\" AttributeType=\"%s\" Center=\"Cell\">\n", TStreamer::getAttributeName()); fprintf(xmf, " <DataItem Dimensions=\"%d %d %d %d\" NumberType=\"Float\" Precision=\"%d\" Format=\"HDF\">\n",(int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)sizeof(hdf5Real)); fprintf(xmf, " %s:/data\n", filename_h5.c_str()); fprintf(xmf, " </DataItem>\n"); fprintf(xmf, " </Attribute>\n"); fprintf(xmf, " </Grid>\n"); fprintf(xmf, " </Domain>\n"); fprintf(xmf, "</Xdmf>\n"); fclose(xmf); } #else #warning USE OF HDF WAS DISABLED AT COMPILE TIME #endif } CUBISM_NAMESPACE_END #endif /* HDF5SUBDOMAINDUMPERMPI_H_UAFPTNPL */
vecmul.c
#include "stdio.h" #define N 1024 int main() { int i; float p[N], v1[N], v2[N]; for(i=0; i<N; i++) { v1[i] = 2.0; v2[i] = 3.0; } #pragma omp target map(to:v1, v2) map(from:p) #pragma omp parallel for for(i=0; i<N; i++) { p[i] = v1[i] * v2[i]; } printf("output: p[0]=%f\n", p[0]); printf("output: p[1]=%f\n", p[1]); return 0; }
schur_eliminator_impl.h
// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2010, 2011, 2012 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: sameeragarwal@google.com (Sameer Agarwal) // // TODO(sameeragarwal): row_block_counter can perhaps be replaced by // Chunk::start ? #ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_ #define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_ // Eigen has an internal threshold switching between different matrix // multiplication algorithms. In particular for matrices larger than // EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly // matrix matrix product algorithm that has a higher setup cost. For // matrix sizes close to this threshold, especially when the matrices // are thin and long, the default choice may not be optimal. This is // the case for us, as the default choice causes a 30% performance // regression when we moved from Eigen2 to Eigen3. #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10 #ifdef CERES_USE_OPENMP #include <omp.h> #else #include <tbb/tbb.h> #endif #include <algorithm> #include <map> #include "ceres/block_random_access_matrix.h" #include "ceres/block_sparse_matrix.h" #include "ceres/block_structure.h" #include "ceres/internal/eigen.h" #include "ceres/internal/fixed_array.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/map_util.h" #include "ceres/schur_eliminator.h" #include "ceres/small_blas.h" #include "ceres/stl_util.h" #include "Eigen/Dense" #include "glog/logging.h" namespace ceres { namespace internal { template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() { STLDeleteElements(&rhs_locks_); } template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: Init(int num_eliminate_blocks, const CompressedRowBlockStructure* bs) { CHECK_GT(num_eliminate_blocks, 0) << "SchurComplementSolver cannot be initialized with " << "num_eliminate_blocks = 0."; num_eliminate_blocks_ = num_eliminate_blocks; const int num_col_blocks = bs->cols.size(); const int num_row_blocks = bs->rows.size(); buffer_size_ = 1; chunks_.clear(); lhs_row_layout_.clear(); int lhs_num_rows = 0; // Add a map object for each block in the reduced linear system // and build the row/column block structure of the reduced linear // system. lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_); for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) { lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows; lhs_num_rows += bs->cols[i].size; } int r = 0; // Iterate over the row blocks of A, and detect the chunks. The // matrix should already have been ordered so that all rows // containing the same y block are vertically contiguous. Along // the way also compute the amount of space each chunk will need // to perform the elimination. while (r < num_row_blocks) { const int chunk_block_id = bs->rows[r].cells.front().block_id; if (chunk_block_id >= num_eliminate_blocks_) { break; } chunks_.push_back(Chunk()); Chunk& chunk = chunks_.back(); chunk.size = 0; chunk.start = r; int buffer_size = 0; const int e_block_size = bs->cols[chunk_block_id].size; // Add to the chunk until the first block in the row is // different than the one in the first row for the chunk. while (r + chunk.size < num_row_blocks) { const CompressedRow& row = bs->rows[r + chunk.size]; if (row.cells.front().block_id != chunk_block_id) { break; } // Iterate over the blocks in the row, ignoring the first // block since it is the one to be eliminated. for (int c = 1; c < row.cells.size(); ++c) { const Cell& cell = row.cells[c]; if (InsertIfNotPresent( &(chunk.buffer_layout), cell.block_id, buffer_size)) { buffer_size += e_block_size * bs->cols[cell.block_id].size; } } buffer_size_ = max(buffer_size, buffer_size_); ++chunk.size; } CHECK_GT(chunk.size, 0); r += chunk.size; } const Chunk& chunk = chunks_.back(); uneliminated_row_begins_ = chunk.start + chunk.size; if (num_threads_ > 1) { random_shuffle(chunks_.begin(), chunks_.end()); } buffer_.reset(new double[buffer_size_ * num_threads_]); // chunk_outer_product_buffer_ only needs to store e_block_size * // f_block_size, which is always less than buffer_size_, so we just // allocate buffer_size_ per thread. chunk_outer_product_buffer_.reset(new double[buffer_size_ * num_threads_]); STLDeleteElements(&rhs_locks_); rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_); for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) { rhs_locks_[i] = new Mutex; } } template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: Eliminate(const BlockSparseMatrix* A, const double* b, const double* D, BlockRandomAccessMatrix* lhs, double* rhs) { if (lhs->num_rows() > 0) { lhs->SetZero(); VectorRef(rhs, lhs->num_rows()).setZero(); } const CompressedRowBlockStructure* bs = A->block_structure(); const int num_col_blocks = bs->cols.size(); // Add the diagonal to the schur complement. if (D != NULL) { #ifdef CERES_USE_OPENMP #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) { #else tbb::parallel_for(num_eliminate_blocks_, num_col_blocks, 1, [&](size_t i) { #endif const int block_id = i - num_eliminate_blocks_; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block_id, block_id, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { const int block_size = bs->cols[i].size; typename EigenTypes<kFBlockSize>::ConstVectorRef diag(D + bs->cols[i].position, block_size); CeresMutexLock l(&cell_info->m); MatrixRef m(cell_info->values, row_stride, col_stride); m.block(r, c, block_size, block_size).diagonal() += diag.array().square().matrix(); } } #ifndef CERES_USE_OPENMP ); #endif } // Eliminate y blocks one chunk at a time. For each chunk,x3 // compute the entries of the normal equations and the gradient // vector block corresponding to the y block and then apply // Gaussian elimination to them. The matrix ete stores the normal // matrix corresponding to the block being eliminated and array // buffer_ contains the non-zero blocks in the row corresponding // to this y block in the normal equations. This computation is // done in ChunkDiagonalBlockAndGradient. UpdateRhs then applies // gaussian elimination to the rhs of the normal equations, // updating the rhs of the reduced linear system by modifying rhs // blocks for all the z blocks that share a row block/residual // term with the y block. EliminateRowOuterProduct does the // corresponding operation for the lhs of the reduced linear // system. #ifdef CERES_USE_OPENMP #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) for (int i = 0; i < chunks_.size(); ++i) { int thread_id = omp_get_thread_num(); #else concurrent_queue<int> thread_queue(num_threads_); tbb::parallel_for(size_t(0), chunks_.size(), [&](size_t i) { int thread_id; thread_queue.wait_and_pop(thread_id); #endif double* buffer = buffer_.get() + thread_id * buffer_size_; const Chunk& chunk = chunks_[i]; const int e_block_id = bs->rows[chunk.start].cells.front().block_id; const int e_block_size = bs->cols[e_block_id].size; VectorRef(buffer, buffer_size_).setZero(); typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size, e_block_size); if (D != NULL) { const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(D + bs->cols[e_block_id].position, e_block_size); ete = diag.array().square().matrix().asDiagonal(); } else { ete.setZero(); } FixedArray<double, 8> g(e_block_size); typename EigenTypes<kEBlockSize>::VectorRef gref(g.get(), e_block_size); gref.setZero(); // We are going to be computing // // S += F'F - F'E(E'E)^{-1}E'F // // for each Chunk. The computation is broken down into a number of // function calls as below. // Compute the outer product of the e_blocks with themselves (ete // = E'E). Compute the product of the e_blocks with the // corresonding f_blocks (buffer = E'F), the gradient of the terms // in this chunk (g) and add the outer product of the f_blocks to // Schur complement (S += F'F). ChunkDiagonalBlockAndGradient( chunk, A, b, chunk.start, &ete, g.get(), buffer, lhs); // Normally one wouldn't compute the inverse explicitly, but // e_block_size will typically be a small number like 3, in // which case its much faster to compute the inverse once and // use it to multiply other matrices/vectors instead of doing a // Solve call over and over again. typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete = ete .template selfadjointView<Eigen::Upper>() .llt() .solve(Matrix::Identity(e_block_size, e_block_size)); // For the current chunk compute and update the rhs of the reduced // linear system. // // rhs = F'b - F'E(E'E)^(-1) E'b FixedArray<double, 8> inverse_ete_g(e_block_size); MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>( inverse_ete.data(), e_block_size, e_block_size, g.get(), inverse_ete_g.get()); UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.get(), rhs); // S -= F'E(E'E)^{-1}E'F ChunkOuterProduct(bs, inverse_ete, buffer, chunk.buffer_layout, lhs); #ifndef CERES_USE_OPENMP thread_queue.push(thread_id); #endif } #ifndef CERES_USE_OPENMP ); #endif // For rows with no e_blocks, the schur complement update reduces to // S += F'F. NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs); } template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: BackSubstitute(const BlockSparseMatrix* A, const double* b, const double* D, const double* z, double* y) { const CompressedRowBlockStructure* bs = A->block_structure(); #ifdef CERES_USE_OPENMP #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) for (int i = 0; i < chunks_.size(); ++i) { #else tbb::parallel_for(size_t(0), chunks_.size(), [&](size_t i) { #endif const Chunk& chunk = chunks_[i]; const int e_block_id = bs->rows[chunk.start].cells.front().block_id; const int e_block_size = bs->cols[e_block_id].size; double* y_ptr = y + bs->cols[e_block_id].position; typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size); typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size, e_block_size); if (D != NULL) { const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(D + bs->cols[e_block_id].position, e_block_size); ete = diag.array().square().matrix().asDiagonal(); } else { ete.setZero(); } const double* values = A->values(); for (int j = 0; j < chunk.size; ++j) { const CompressedRow& row = bs->rows[chunk.start + j]; const Cell& e_cell = row.cells.front(); DCHECK_EQ(e_block_id, e_cell.block_id); FixedArray<double, 8> sj(row.block.size); typename EigenTypes<kRowBlockSize>::VectorRef(sj.get(), row.block.size) = typename EigenTypes<kRowBlockSize>::ConstVectorRef (b + bs->rows[chunk.start + j].block.position, row.block.size); for (int c = 1; c < row.cells.size(); ++c) { const int f_block_id = row.cells[c].block_id; const int f_block_size = bs->cols[f_block_id].size; const int r_block = f_block_id - num_eliminate_blocks_; MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>( values + row.cells[c].position, row.block.size, f_block_size, z + lhs_row_layout_[r_block], sj.get()); } MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>( values + e_cell.position, row.block.size, e_block_size, sj.get(), y_ptr); MatrixTransposeMatrixMultiply <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>( values + e_cell.position, row.block.size, e_block_size, values + e_cell.position, row.block.size, e_block_size, ete.data(), 0, 0, e_block_size, e_block_size); } ete.llt().solveInPlace(y_block); } #ifndef CERES_USE_OPENMP ); #endif } // Update the rhs of the reduced linear system. Compute // // F'b - F'E(E'E)^(-1) E'b template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: UpdateRhs(const Chunk& chunk, const BlockSparseMatrix* A, const double* b, int row_block_counter, const double* inverse_ete_g, double* rhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const int e_block_id = bs->rows[chunk.start].cells.front().block_id; const int e_block_size = bs->cols[e_block_id].size; int b_pos = bs->rows[row_block_counter].block.position; const double* values = A->values(); for (int j = 0; j < chunk.size; ++j) { const CompressedRow& row = bs->rows[row_block_counter + j]; const Cell& e_cell = row.cells.front(); typename EigenTypes<kRowBlockSize>::Vector sj = typename EigenTypes<kRowBlockSize>::ConstVectorRef (b + b_pos, row.block.size); MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>( values + e_cell.position, row.block.size, e_block_size, inverse_ete_g, sj.data()); for (int c = 1; c < row.cells.size(); ++c) { const int block_id = row.cells[c].block_id; const int block_size = bs->cols[block_id].size; const int block = block_id - num_eliminate_blocks_; CeresMutexLock l(rhs_locks_[block]); MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>( values + row.cells[c].position, row.block.size, block_size, sj.data(), rhs + lhs_row_layout_[block]); } b_pos += row.block.size; } } // Given a Chunk - set of rows with the same e_block, e.g. in the // following Chunk with two rows. // // E F // [ y11 0 0 0 | z11 0 0 0 z51] // [ y12 0 0 0 | z12 z22 0 0 0] // // this function computes twp matrices. The diagonal block matrix // // ete = y11 * y11' + y12 * y12' // // and the off diagonal blocks in the Guass Newton Hessian. // // buffer = [y11'(z11 + z12), y12' * z22, y11' * z51] // // which are zero compressed versions of the block sparse matrices E'E // and E'F. // // and the gradient of the e_block, E'b. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: ChunkDiagonalBlockAndGradient( const Chunk& chunk, const BlockSparseMatrix* A, const double* b, int row_block_counter, typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete, double* g, double* buffer, BlockRandomAccessMatrix* lhs) { const CompressedRowBlockStructure* bs = A->block_structure(); int b_pos = bs->rows[row_block_counter].block.position; const int e_block_size = ete->rows(); // Iterate over the rows in this chunk, for each row, compute the // contribution of its F blocks to the Schur complement, the // contribution of its E block to the matrix EE' (ete), and the // corresponding block in the gradient vector. const double* values = A->values(); for (int j = 0; j < chunk.size; ++j) { const CompressedRow& row = bs->rows[row_block_counter + j]; if (row.cells.size() > 1) { EBlockRowOuterProduct(A, row_block_counter + j, lhs); } // Extract the e_block, ETE += E_i' E_i const Cell& e_cell = row.cells.front(); MatrixTransposeMatrixMultiply <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>( values + e_cell.position, row.block.size, e_block_size, values + e_cell.position, row.block.size, e_block_size, ete->data(), 0, 0, e_block_size, e_block_size); // g += E_i' b_i MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>( values + e_cell.position, row.block.size, e_block_size, b + b_pos, g); // buffer = E'F. This computation is done by iterating over the // f_blocks for each row in the chunk. for (int c = 1; c < row.cells.size(); ++c) { const int f_block_id = row.cells[c].block_id; const int f_block_size = bs->cols[f_block_id].size; double* buffer_ptr = buffer + FindOrDie(chunk.buffer_layout, f_block_id); MatrixTransposeMatrixMultiply <kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>( values + e_cell.position, row.block.size, e_block_size, values + row.cells[c].position, row.block.size, f_block_size, buffer_ptr, 0, 0, e_block_size, f_block_size); } b_pos += row.block.size; } } // Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the // Schur complement matrix, i.e // // S -= F'E(E'E)^{-1}E'F. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: ChunkOuterProduct(const CompressedRowBlockStructure* bs, const Matrix& inverse_ete, const double* buffer, const BufferLayoutType& buffer_layout, BlockRandomAccessMatrix* lhs) { // This is the most computationally expensive part of this // code. Profiling experiments reveal that the bottleneck is not the // computation of the right-hand matrix product, but memory // references to the left hand side. const int e_block_size = inverse_ete.rows(); BufferLayoutType::const_iterator it1 = buffer_layout.begin(); #ifdef CERES_USE_OPENMP int thread_id = omp_get_thread_num(); #else int thread_id = 0; #endif double* b1_transpose_inverse_ete = chunk_outer_product_buffer_.get() + thread_id * buffer_size_; // S(i,j) -= bi' * ete^{-1} b_j for (; it1 != buffer_layout.end(); ++it1) { const int block1 = it1->first - num_eliminate_blocks_; const int block1_size = bs->cols[it1->first].size; MatrixTransposeMatrixMultiply <kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>( buffer + it1->second, e_block_size, block1_size, inverse_ete.data(), e_block_size, e_block_size, b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size); BufferLayoutType::const_iterator it2 = it1; for (; it2 != buffer_layout.end(); ++it2) { const int block2 = it2->first - num_eliminate_blocks_; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { const int block2_size = bs->cols[it2->first].size; CeresMutexLock l(&cell_info->m); MatrixMatrixMultiply <kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>( b1_transpose_inverse_ete, block1_size, e_block_size, buffer + it2->second, e_block_size, block2_size, cell_info->values, r, c, row_stride, col_stride); } } } } // For rows with no e_blocks, the schur complement update reduces to S // += F'F. This function iterates over the rows of A with no e_block, // and calls NoEBlockRowOuterProduct on each row. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: NoEBlockRowsUpdate(const BlockSparseMatrix* A, const double* b, int row_block_counter, BlockRandomAccessMatrix* lhs, double* rhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const double* values = A->values(); for (; row_block_counter < bs->rows.size(); ++row_block_counter) { const CompressedRow& row = bs->rows[row_block_counter]; for (int c = 0; c < row.cells.size(); ++c) { const int block_id = row.cells[c].block_id; const int block_size = bs->cols[block_id].size; const int block = block_id - num_eliminate_blocks_; MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( values + row.cells[c].position, row.block.size, block_size, b + row.block.position, rhs + lhs_row_layout_[block]); } NoEBlockRowOuterProduct(A, row_block_counter, lhs); } } // A row r of A, which has no e_blocks gets added to the Schur // Complement as S += r r'. This function is responsible for computing // the contribution of a single row r to the Schur complement. It is // very similar in structure to EBlockRowOuterProduct except for // one difference. It does not use any of the template // parameters. This is because the algorithm used for detecting the // static structure of the matrix A only pays attention to rows with // e_blocks. This is becase rows without e_blocks are rare and // typically arise from regularization terms in the original // optimization problem, and have a very different structure than the // rows with e_blocks. Including them in the static structure // detection will lead to most template parameters being set to // dynamic. Since the number of rows without e_blocks is small, the // lack of templating is not an issue. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: NoEBlockRowOuterProduct(const BlockSparseMatrix* A, int row_block_index, BlockRandomAccessMatrix* lhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const CompressedRow& row = bs->rows[row_block_index]; const double* values = A->values(); for (int i = 0; i < row.cells.size(); ++i) { const int block1 = row.cells[i].block_id - num_eliminate_blocks_; DCHECK_GE(block1, 0); const int block1_size = bs->cols[row.cells[i].block_id].size; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { CeresMutexLock l(&cell_info->m); // This multiply currently ignores the fact that this is a // symmetric outer product. MatrixTransposeMatrixMultiply <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>( values + row.cells[i].position, row.block.size, block1_size, values + row.cells[i].position, row.block.size, block1_size, cell_info->values, r, c, row_stride, col_stride); } for (int j = i + 1; j < row.cells.size(); ++j) { const int block2 = row.cells[j].block_id - num_eliminate_blocks_; DCHECK_GE(block2, 0); DCHECK_LT(block1, block2); int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { const int block2_size = bs->cols[row.cells[j].block_id].size; CeresMutexLock l(&cell_info->m); MatrixTransposeMatrixMultiply <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>( values + row.cells[i].position, row.block.size, block1_size, values + row.cells[j].position, row.block.size, block2_size, cell_info->values, r, c, row_stride, col_stride); } } } } // For a row with an e_block, compute the contribition S += F'F. This // function has the same structure as NoEBlockRowOuterProduct, except // that this function uses the template parameters. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: EBlockRowOuterProduct(const BlockSparseMatrix* A, int row_block_index, BlockRandomAccessMatrix* lhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const CompressedRow& row = bs->rows[row_block_index]; const double* values = A->values(); for (int i = 1; i < row.cells.size(); ++i) { const int block1 = row.cells[i].block_id - num_eliminate_blocks_; DCHECK_GE(block1, 0); const int block1_size = bs->cols[row.cells[i].block_id].size; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { CeresMutexLock l(&cell_info->m); // block += b1.transpose() * b1; MatrixTransposeMatrixMultiply <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>( values + row.cells[i].position, row.block.size, block1_size, values + row.cells[i].position, row.block.size, block1_size, cell_info->values, r, c, row_stride, col_stride); } for (int j = i + 1; j < row.cells.size(); ++j) { const int block2 = row.cells[j].block_id - num_eliminate_blocks_; DCHECK_GE(block2, 0); DCHECK_LT(block1, block2); const int block2_size = bs->cols[row.cells[j].block_id].size; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { // block += b1.transpose() * b2; CeresMutexLock l(&cell_info->m); MatrixTransposeMatrixMultiply <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>( values + row.cells[i].position, row.block.size, block1_size, values + row.cells[j].position, row.block.size, block2_size, cell_info->values, r, c, row_stride, col_stride); } } } } } // namespace internal } // namespace ceres #endif // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
chi2float.c
// fast chi-squared distance function in x86 compiler intrinsics // (C) 2007-2008 Christoph Lampert <christoph.lampert@gmail.com> #include <stdio.h> #include <values.h> // for FLT_MIN /* We calculate calculate chi2=(a-b)**2/(a+b+FLT_MIN) to avoid division-by-zero: If a+b != 0, then (a+b+FLT_MIN)==(a+b) and nothing changed. If a+b == 0, then the numerator is 0 as well, and we don't divide by 0. */ /* Using SSE compiler intrinsics can have a huge speedup effect: 8x for float and 3.5x for double on Intel Core2. You have to compile with the right CPU setting, e.g. gcc -march=k8 or -march=nocona */ #ifdef __SSE__ #include <xmmintrin.h> // for float #endif /* OpenMP allows to achieve almost linear speedup on multiCore CPUs: use gcc-4.2 -fopenmp */ #ifdef _OPENMP #include <omp.h> #endif static inline float chi2_baseline_float(const int n, const float* x, const float* y) { float result = 0.f; int i; for (i=0; i<n; i++) { const float num = x[i]-y[i]; const float denom = 1./(x[i]+y[i]+FLT_MIN); result += num*num*denom; } return result; } /* use compiler intrinsics for 4x parallel processing */ static inline float chi2_intrinsic_float(int n, const float* x, const float* y) { float result=0; const __m128 eps = _mm_set1_ps(FLT_MIN); const __m128 zero = _mm_setzero_ps(); __m128 chi2 = _mm_setzero_ps(); for (; n>3; n-=4) { const __m128 a = _mm_loadu_ps(x); const __m128 b = _mm_loadu_ps(y); const __m128 a_plus_eps = _mm_add_ps(a,eps); const __m128 a_plus_b_plus_eps = _mm_add_ps(a_plus_eps,b); const __m128 a_minus_b = _mm_sub_ps(a,b); const __m128 a_minus_b_sq = _mm_mul_ps(a_minus_b, a_minus_b); const __m128 prod = _mm_div_ps(a_minus_b_sq, a_plus_b_plus_eps); chi2 = _mm_add_ps(chi2, prod); x+=4; y+=4; } const __m128 shuffle1 = _mm_shuffle_ps(chi2, chi2, _MM_SHUFFLE(1,0,3,2)); const __m128 sum1 = _mm_add_ps(chi2, shuffle1); const __m128 shuffle2 = _mm_shuffle_ps(sum1, sum1, _MM_SHUFFLE(2,3,0,1)); const __m128 sum2 = _mm_add_ps(sum1, shuffle2); // with SSE3, we could use hadd_ps, but the difference is negligible _mm_store_ss(&result,sum2); _mm_empty(); if (n) result += chi2_baseline_float(n, x, y); // remaining 1-3 entries return result; } /* calculate the chi2-distance between two vectors/histograms */ float chi2_float(const int dim, const float* const x, const float* const y) { float (*chi2_float)(const int, const float*, const float*) = chi2_baseline_float; #ifdef __SSE__ chi2_float = chi2_intrinsic_float; #endif return chi2_float(dim, x, y); } /* calculate the chi2-distance matrix between a sets of vectors/histograms. */ float chi2sym_distance_float(const int dim, const int nx, const float* const x, float* const K) { float (*chi2_float)(const int, const float*, const float*) = chi2_baseline_float; #ifdef __SSE__ chi2_float = chi2_intrinsic_float; #endif float sumK=0.f; #pragma omp parallel { int i,j; #pragma omp for reduction (+:sumK) schedule (dynamic,2) for (i=0;i<nx;i++) { K[i*nx+i]=0.; for (j=0;j<i;j++) { const float chi2 = (*chi2_float)(dim, &x[i*dim], &x[j*dim]); K[i*nx+j] = chi2; K[j*nx+i] = chi2; sumK += 2*chi2; } } } return sumK/((float)(nx*nx)); } /* calculate the chi2-distance matrix between two sets of vectors/histograms. */ float chi2_distance_float(const int dim, const int nx, const float* const x, const int ny, const float* const y, float* const K) { float (*chi2_float)(const int, const float*, const float*) = chi2_baseline_float; #ifdef __SSE__ chi2_float = chi2_intrinsic_float; #endif float sumK=0.f; #pragma omp parallel { int i,j; #pragma omp for reduction (+:sumK) schedule (dynamic,2) for (i=0;i<nx;i++) { for (j=0;j<ny;j++) { float chi2 = (*chi2_float)(dim, &x[i*dim], &y[j*dim]); K[i*ny+j] = chi2; sumK += chi2; } } } return sumK/((float)(nx*ny)); } #ifdef __MAIN__ #include <stdlib.h> #include <malloc.h> #include <time.h> int main() { const int dim=3000; const int n1=1000; const int n2=2000; int i,j; /* test calculating a kernel with float entries */ float *data1 = (float*)memalign(16,dim*n1*sizeof(float)); float *data2 = (float*)memalign(16,dim*n2*sizeof(float)); float *K = (float*)malloc(n1*n2*sizeof(float)); if ((!data1) || (!data2) || (!K)) { free(data1); free(data2); free(K); return 1; } const clock_t before_init=clock(); for (i=0;i<n1*dim;i++) data1[i]=1./(float)(i+1.); for (i=0;i<n2*dim;i++) data2[i]=1./(float)(i+1.); const clock_t after_init=clock(); printf("init time: %8.4f\n",(after_init-before_init)*1./CLOCKS_PER_SEC); const clock_t before_chi2=clock(); const float mean_K = chi2_distance_float(dim, n1, data1, n2, data2, K); const clock_t after_chi2=clock(); printf("chi2 time: %8.4f\n",(after_chi2-before_chi2)*1./CLOCKS_PER_SEC); printf("result: %e\n",mean_K); free(data1); free(data2); free(K); return 0; } #endif
sgetri_aux.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgetri_aux.c, normal z -> s, Fri Sep 28 17:38:09 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_getri * * Auxiliary routine for plasma_sgetri. * ******************************************************************************* * * @param[in] n * The number of columns of the matrix B. n >= 0. * * @param[in,out] pA * On entry, the upper-triangular part contains the inverse of the * U-factor, and the lower-triangular part contains the L-factor, * both factors are computed by plasma_sgetrf. * On exit, the inverse of L*U, overwriting the factors. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,k). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_sgetri * @sa plasma_cgetri * @sa plasma_dgetri * @sa plasma_sgetri * ******************************************************************************/ int plasma_sgetri_aux(int n, float *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (n < 0) { plasma_error("illegal value of n"); return -1; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -3; } // quick return if (n == 0) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t W; int retval; retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, n, nb, 0, 0, n, nb, &W); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_sge2desc(pA, lda, A, &sequence, &request); // Call the tile async function. plasma_omp_sgetri_aux(A, W, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_sdesc2ge(A, pA, lda, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&W); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_getri * * Computes triangular solve. * Non-blocking tile version of plasma_sgetri_aux(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] A * Descriptor of the matrix. * * @param[in] W * Workspace of dimension (n, nb). * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_sgetri * @sa plasma_omp_cgetri * @sa plasma_omp_dgetri * @sa plasma_omp_sgetri * ******************************************************************************/ void plasma_omp_sgetri_aux(plasma_desc_t A, plasma_desc_t W, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(W) != PlasmaSuccess) { plasma_error("invalid W"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0) return; // Call the parallel function. plasma_psgetri_aux(A, W, sequence, request); }
graphProcessing.h
/* FINISH TEMPFLATPATH CODE AS WRITTEN, THESE FUNCTIONS WILL ONLY WORK WITH GRAPHS THAT ARE IMPLEMENTED IN THE boost NAMESPACE. */ #define LP 1 #define PERFDEBUG 0 //#define FULLDEBUG 1 #ifdef _OPENMP #include <omp.h> #endif #include <boost/regex.hpp> #include <iostream> #include <fstream> #include <string> #include <assert.h> #include <staticCFG.h> /** *@file graphProcessing.h *Brief Overview of Algorithm: *********************** *Current Implementation *********************** *This implementation uses BOOSTs graph structure to analyze the paths of the graph *The path analyzer sends the user paths to be evaluated by the "analyzePath" function that is user defined ************************** *Further Improvements: TODO ************************** @todo utilize BOOST visitors to take advantage of the BOOST graph structures abilities *************** *Contact Info *************** *Finally, blame can be assigned to and questions can be forwarded to the author, though response is not guaranteed *if I'm still at Lawrence *hoffman34 AT llnl DOT gov *@author Michael Hoffman */ #include <boost/graph/adjacency_list.hpp> #include <boost/bind.hpp> #include <boost/foreach.hpp> #include <boost/tuple/tuple.hpp> #include <boost/graph/graphviz.hpp> #include <boost/graph/dominator_tree.hpp> #include <boost/graph/reverse_graph.hpp> #include <boost/graph/transpose_graph.hpp> #include <boost/algorithm/string.hpp> #include <vector> #include <algorithm> #include <utility> #include <iostream> #include <sys/time.h> #include <sys/resource.h> #include <sys/time.h> template <class CFG> class SgGraphTraversal { public: typedef typename boost::graph_traits<CFG>::vertex_descriptor Vertex; typedef typename boost::graph_traits<CFG>:: edge_descriptor Edge; void constructPathAnalyzer(CFG* g, bool unbounded=false, Vertex end=0, Vertex begin=0, bool ns = true); virtual void analyzePath(std::vector<Vertex>& pth) = 0; std::vector<int> getInEdges(int& node, CFG*& g); std::vector<int> getOutEdges(int& node, CFG*& g); int getTarget(int& n, CFG*& g); int getSource(int& n, CFG*& g); std::map<Vertex, int> vertintmap; std::map<Edge, int> edgeintmap; std::map<int, Vertex> intvertmap; std::map<int, Edge> intedgemap; SgGraphTraversal(); virtual ~SgGraphTraversal(); SgGraphTraversal( SgGraphTraversal &); SgGraphTraversal &operator=( SgGraphTraversal &); int pathnum; void firstPrepGraph(CFG*& g); private: int normals; int abnormals; bool needssafety; int recursed; int checkedfound; // typedef typename boost::graph_traits<CFG>::vertex_descriptor Vertex; // typedef typename boost::graph_traits<CFG>:: edge_descriptor Edge; // std::vector<int> getInEdges(int& node, CFG*& g); // std::vector<int> getOutEdges(int& node, CFG*& g); void prepareGraph(CFG*& g); void findClosuresAndMarkersAndEnumerate(CFG*& g); // void constructPathAnalyzer(CFG* g, bool unbounded=false, Vertex end=0, Vertex begin=0, bool ns = true); // virtual void analyzePath(std::vector<Vertex>& pth) = 0; // void firstPrepGraph(CFG*& g); int stoppedpaths; std::set<std::vector<int> > traversePath(int begin, int end, CFG*& g, bool loop=false); std::set<std::vector<int> > uTraversePath(int begin, int end, CFG*& g, bool loop, std::map<int, std::vector<std::vector<int> > >& localLoops); std::vector<std::vector<int> > bfsTraversePath(int begin, int end, CFG*& g, bool loop=false); std::vector<int> unzipPath(std::vector<int>& path, CFG*& g, int start, int end); std::vector<int> zipPath(std::vector<int>& path, CFG*& g, int start, int end); std::vector<int> zipPath2(std::vector<int>& path, CFG*& g); void printCFGNode(int& cf, std::ofstream& o); void printCFGNodeGeneric(int& cf, std::string prop, std::ofstream& o); void printCFGEdge(int& cf, CFG*& cfg, std::ofstream& o); void printHotness(CFG*& g); void printPathDot(CFG*& g); void computeOrder(CFG*& g, const int& begin); void computeSubGraphs(const int& begin, const int &end, CFG*& g, int depthDifferential); //int getTarget(int& n, CFG*& g); //int getSource(int& n, CFG*& g); std::vector<int> sources; std::vector<int> sinks; std::vector<int> recursiveLoops; std::vector<int> recurses; std::map<int, int> ptsNum; bool borrowed; std::set<int> badloop; std::map<int, std::vector<std::vector<int> > > totalLoops; // int pathnum; std::map<int, std::string> nodeStrings; int sourcenum; unsigned long long evaledpaths; int badpaths; int workingthreadnum; bool workingthread; std::map<int, std::set<std::vector<int> > > loopStore; std::vector<std::vector<int> > pathStore; std::map<int, std::vector<int> > subpathglobal; std::map<std::vector<int>, int> subpathglobalinv; int nextsubpath; std::vector<int> orderOfNodes; // std::map<Vertex, int> vertintmap; // std::map<Edge, int> edgeintmap; // std::map<int, Vertex> intvertmap; // std::map<int, Edge> intedgemap; std::vector<std::map<Vertex, Vertex> > SubGraphGraphMap; std::vector<std::map<Vertex, Vertex> > GraphSubGraphMap; std::vector<CFG*> subGraphVector; void getVertexPath(std::vector<int> path, CFG*& g, std::vector<Vertex>& vertexPath ); void storeCompact(std::vector<int> path); int nextNode; int nextEdge; std::vector<int> markers; std::vector<int> closures; std::map<int, int> markerIndex; std::map<int, std::vector<int> > pathsAtMarkers; typedef typename boost::graph_traits<CFG>::vertex_iterator vertex_iterator; typedef typename boost::graph_traits<CFG>::out_edge_iterator out_edge_iterator; typedef typename boost::graph_traits<CFG>::in_edge_iterator in_edge_iterator; typedef typename boost::graph_traits<CFG>::edge_iterator edge_iterator; bool bound; // SgGraphTraversal(); // virtual ~SgGraphTraversal(); // SgGraphTraversal( SgGraphTraversal &); // SgGraphTraversal &operator=( SgGraphTraversal &); }; template<class CFG> SgGraphTraversal<CFG>:: SgGraphTraversal() { } template<class CFG> SgGraphTraversal<CFG> & SgGraphTraversal<CFG>:: operator=( SgGraphTraversal &other) { return *this; } #ifndef SWIG template<class CFG> SgGraphTraversal<CFG>:: ~SgGraphTraversal() { } #endif /** Gets the source of an edge SgGraphTraversal::getSource Input: @param[edge] int& integer representation of edge in question @param[g] CFG*& the CFG used */ template<class CFG> inline int SgGraphTraversal<CFG>:: getSource(int& edge, CFG*& g) { Edge e = intedgemap[edge]; Vertex v = boost::source(e, *g); return(vertintmap[v]); } /** Gets the target of an edge SgGraphTraversal::getTarget Input: @param[edge] int& integer representation of edge in quesution @param[g] the CFG*& CFG used */ template<class CFG> inline int SgGraphTraversal<CFG>:: getTarget(int& edge, CFG*& g) { Edge e = intedgemap[edge]; Vertex v = boost::target(e, *g); return(vertintmap[v]); } /** Gets out edges with integer inputs, internal use only SgGraphTraversal::getInEdges Input: @param[node] int, integer representation of the node to get the in edges from @param[g] CFG* g, CFG */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: getInEdges(int& node, CFG*& g) { Vertex getIns = intvertmap[node]; std::vector<int> inedges; // DQ (4/11/2017): Fix Klockworks issue of uninitialized variables. #if 1 in_edge_iterator i, j; #else // This does not compile. in_edge_iterator i = inedges.begin(); in_edge_iterator j = i; #endif for (boost::tie(i, j) = boost::in_edges(getIns, *g); i != j; ++i) { inedges.push_back(edgeintmap[*i]); } return inedges; } /** Gets out edges with integer inputs, internal use only SgGraphTraversal::getOutEdges Input: @param[node] int, integer representation of the node to get the out edges from @param[g] CFG* g, CFG */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: getOutEdges(int &node, CFG*& g) { Vertex getOuts = intvertmap[node]; std::vector<int> outedges; // DQ (4/11/2017): Fix Klockworks issue of uninitialized variables. #if 1 out_edge_iterator i, j; #else // This does not compile. out_edge_iterator i = outedges.begin(); out_edge_iterator j = i; #endif for (boost::tie(i, j) = boost::out_edges(getOuts, *g); i != j; ++i) { outedges.push_back(edgeintmap[*i]); } return outedges; } /** Condenses paths, currently deprecated... Input: @param[pth] std::vector<int> the original path @param[g] CFG*, the ambient graph Output: zipped path */ template<class CFG> inline std::vector<int> SgGraphTraversal<CFG>:: zipPath2(std::vector<int>& pth, CFG*& g) { std::vector<int> npth; npth.push_back(pth[0]); for (int i = 1; i < pth.size()-1; i++) { if (find(closures.begin(), closures.end(), pth[i]) != closures.end()) { npth.push_back(pth[i]); } } npth.push_back(pth.back()); return npth; } /** Condenses paths to simply the first and last node and the ordered set of edges taken at nodes with more than 1 outedge Input: @param[pth] std::vector<int>, the original path @param[g] CFG*, the ambient graph @param[start] integer representation of the first node @param[end] integer representation of the last node */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: zipPath(std::vector<int>& pth, CFG*& g, int start, int end) { std::vector<int> subpath; std::vector<int> movepath; movepath.push_back(pth.front()); movepath.push_back(pth.back()); for (unsigned int qw = 0; qw < pth.size()-1; qw++) { if (find(markers.begin(), markers.end(), pth[qw]) != markers.end()) { std::vector<int> oeds = getOutEdges(pth[qw], g); for (unsigned int i = 0; i < oeds.size(); i++) { if (getTarget(oeds[i], g) == pth[qw+1]) { movepath.push_back(oeds[i]); } } } } return movepath; } /** unzips the paths zipped by zipPath Input: @param[pzipped] the zipped path @param[CFG] the ambient graph @param[start] the integer representation of the first node (used to check that zipPath is working correctly) @param[end] the integer representation of the end node */ template<class CFG> std::vector<int> SgGraphTraversal<CFG>:: unzipPath(std::vector<int>& pzipped, CFG*& g, int start, int end) { ROSE_ASSERT(pzipped[0] == start && (pzipped[1] == end || end == -1)); std::vector<int> zipped; for (unsigned int i = 2; i < pzipped.size(); i++) { zipped.push_back(pzipped[i]); } std::vector<int> unzipped; unzipped.push_back(start); std::vector<int> oeds = getOutEdges(start, g); if (oeds.size() == 0) { return unzipped; } for (unsigned int i = 0; i < zipped.size(); i++) { oeds = getOutEdges(unzipped.back(), g); while (oeds.size() == 1) { if (getTarget(oeds[0], g) == end && unzipped.size() != 1) { unzipped.push_back(end); return unzipped; } unzipped.push_back(getTarget(oeds[0], g)); oeds = getOutEdges(unzipped.back(), g); } if (oeds.size() == 0) { return unzipped; } if (oeds.size() > 1 && (unzipped.back() != end || (unzipped.size() == 1 && unzipped.back() == end))) { ROSE_ASSERT(getSource(zipped[i], g) == unzipped.back()); unzipped.push_back(getTarget(zipped[i], g)); } } std::vector<int> oeds2 = getOutEdges(unzipped.back(), g); if (unzipped.back() != end && oeds2.size() != 0) { while (oeds2.size() == 1 && unzipped.back() != end) { unzipped.push_back(getTarget(oeds2[0], g)); oeds2 = getOutEdges(unzipped.back(), g); } } return unzipped; } /* Example Time Example: timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); do_something_long(); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("%.6lf seconds elapsed\n", t2-t1); */ /** The function responsible for collecting all paths without loops, and all paths within lops that do not include other loops then sending those to uTraverse to assemble them into all paths with any combination of loops Input: @param[begin] integer representation of the first node @param[end] integer representation of the last node (or -1 if its not bounded) @param[g] CFG*, the ambient CFG @param[loop] boolean expressing whether or not we are calculating paths contained within a loop */ template<class CFG> std::vector<std::vector<int> > SgGraphTraversal<CFG>:: bfsTraversePath(int begin, int end, CFG*& g, bool loop) { //perfdebug allows for examining the speed of traversal #ifdef PERFDEBUG //timeval tim; //gettimeofday(&tim, NULL); //double tim1 = tim.tv_sec+(tim.tv_usec/1000000.0); #endif bool recursedloop = loop; std::map<int, std::vector<std::vector<int> > > PtP; std::set<int> nodes; std::vector<std::vector<int> > pathContainer; //std::vector<std::vector<int> > oldPaths; std::vector<int> completedLoops; std::vector<std::vector<int> > npc; std::vector<int> bgpath; bgpath.push_back(begin); pathContainer.push_back(bgpath); std::vector<std::vector<int> > newPathContainer; std::vector<std::vector<int> > paths; std::vector<int> localLoops; std::map<int, std::vector<std::vector<int> > > globalLoopPaths; //std::cout << "at the while" << std::endl; //To keep while (pathContainer.size() != 0 /*|| oldPaths.size() != 0*/) { /* unsigned int mpc = 50000; if (pathContainer.size() == 0) { unsigned int mxl = 0; if (oldPaths.size() > mpc) { mxl = mpc/2; } else { mxl = oldPaths.size(); } for (unsigned int k = 0; k < mxl; k++) { pathContainer.push_back(oldPaths.back()); oldPaths.pop_back(); } } if (pathContainer.size() > mpc) { unsigned int j = 0; while (j < mpc) { npc.push_back(pathContainer.back()); pathContainer.pop_back(); j++; } oldPaths.insert(oldPaths.end(), pathContainer.begin(), pathContainer.end()); pathContainer = npc; npc.clear(); } */ //iterating through the currently discovered subpaths to build them up for (unsigned int i = 0; i < pathContainer.size(); i++) { std::vector<int> npth = pathContainer[i]; std::vector<int> oeds = getOutEdges(npth.back(), g); std::vector<int> ieds = getInEdges(npth.back(), g); npth = pathContainer[i]; oeds = getOutEdges(npth.back(), g); if ((!recursedloop && ((bound && npth.back() == end && npth.size() != 1) || (!bound && oeds.size() == 0))) || (recursedloop && npth.back() == end && npth.size() != 1)) { std::vector<int> newpth; newpth = (pathContainer[i]); std::vector<int> movepath = newpth;//zipPath(newpth, g); if (recursedloop && newpth.back() == end && newpth.size() != 1) { paths.push_back(movepath); } else if (!recursedloop) { if (bound && newpth.size() != 1 && newpth.back() == end) { paths.push_back(movepath); } else if (!bound) { paths.push_back(movepath); } } } else { std::vector<int> oeds = getOutEdges(pathContainer[i].back(), g); for (unsigned int j = 0; j < oeds.size(); j++) { int tg = getTarget(oeds[j], g); std::vector<int> newpath = (pathContainer[i]); //we split up paths into pieces so that they don't take up a lot of memory, basically this is when we run into a path //more than once, so we attach all paths that go to that path to that particular node via PtP if (nodes.find(tg) != nodes.end() && find(newpath.begin(), newpath.end(), tg) == newpath.end() && tg != end) { if (PtP.find(tg) == PtP.end()) { std::vector<int> nv; nv.push_back(tg); newPathContainer.push_back(nv); PtP[tg].push_back(/*zipPath(*(*/newpath);//, g, newpath.front(), newpath.back())); } else { PtP[tg].push_back(/*zipPath(*/newpath);//, g, newpath.front(), newpath.back())); } } else if (find(newpath.begin(), newpath.end(), getTarget(oeds[j], g)) == newpath.end() || getTarget(oeds[j], g) == end) { newpath.push_back(tg); std::vector<int> ieds = getInEdges(tg, g); if (ieds.size() > 1) {//find(closures.begin(), closures.end(), tg) != closures.end()) { nodes.insert(tg); } newPathContainer.push_back(newpath); } else if (tg == end && recursedloop) { newpath.push_back(tg); newPathContainer.push_back(newpath); } else {//if (find(newpath.begin(), newpath.end(), tg) != newpath.end() && tg != end) { std::vector<int> ieds = getInEdges(tg, g); if (ieds.size() > 1/*find(closures.begin(), closures.end(), tg) != closures.end()*/ && find(completedLoops.begin(), completedLoops.end(), tg) == completedLoops.end() /*&& find(localLoops.begin(), localLoops.end(), tg) == localLoops.end()*/ && find(recurses.begin(), recurses.end(), tg) == recurses.end()) { localLoops.push_back(tg); nodes.insert(tg); } // else if (find(recurses.begin(), recurses.end(), tg) != recurses.end()) { // } } //else { // std::cout << "problem" << std::endl; // ROSE_ASSERT(false); // } } } } pathContainer = newPathContainer; newPathContainer.clear(); } // std::cout << "done while" << std::endl; pathContainer.clear(); std::vector<std::vector<int> > finnpts; std::vector<std::vector<int> > npts; while (true) { if (paths.size() > 1000000) { std::cout << "too many paths, consider a subgraph" << std::endl; ROSE_ASSERT(false); } //#pragma omp parallel for schedule(guided) for (unsigned int qq = 0; qq < paths.size(); qq++) { std::vector<int> pq = paths[qq]; std::vector<int> qp; int ppf = paths[qq].front(); if (PtP.find(ppf) != PtP.end()) { for (unsigned int kk = 0; kk < PtP[ppf].size(); kk++) { std::vector<int> newpath = /*unzipPath(*/PtP[ppf][kk];//, g, PtP[ppf][kk][0], PtP[ppf][kk][1]); bool good = true; if (newpath.back() == newpath.front() && newpath.front() != begin && newpath.size() > 1) { good = false; } else { // if (find(pq.begin(), pq.end(), newpath.front()) != pq.end() && newpath.front() != begin) { // good = false; // } // else { for (unsigned int kk1 = 0; kk1 < newpath.size(); kk1++) { /* if (newpath.front() == newpath.back()) { good = false; break; } else */if (find(pq.begin(), pq.end(), newpath[kk1]) != pq.end() && newpath[kk1] != begin) { good = false; break; } } //} } if (good) { newpath.insert(newpath.end(), pq.begin(), pq.end()); #pragma omp critical { npts.push_back(newpath); } } } } else { std::vector<int> ppq = pq;// zipPath(pq, g, pq.front(), pq.back()); #pragma omp critical { finnpts.push_back(ppq); } } } if (npts.size() == 0) { break; } else { paths = npts; npts.clear(); } } paths = finnpts; finnpts.clear(); for (unsigned int k = 0; k < localLoops.size(); k++) { int lk = localLoops[k]; std::vector<std::vector<int> > loopp; if (loopStore.find(localLoops[k]) != loopStore.end()) { loopp.insert(loopp.end(), loopStore[localLoops[k]].begin(), loopStore[localLoops[k]].end()); } else { std::map<int, std::vector<std::vector<int> > > localLoopPaths; completedLoops.push_back(lk); recurses.push_back(lk); loopp = bfsTraversePath(lk, lk, g, true); recurses.pop_back(); } for (unsigned int ik = 0; ik < loopp.size(); ik++) { if (find(globalLoopPaths[lk].begin(), globalLoopPaths[lk].end(), loopp[ik]) == globalLoopPaths[lk].end()) { globalLoopPaths[localLoops[k]].push_back(loopp[ik]); } } } borrowed = true; std::vector<std::vector<int> > lps2; //unsigned int maxpaths = 1000; //unsigned int pathdivisor = 1;//paths.size()/maxpaths;///paths.size(); //if (pathdivisor < 1) { //pathdivisor = 1; //maxpaths = paths.size(); // } /* for (unsigned int j = 0; j < pathdivisor+1; j++) { std::vector<std::vector<int> > npaths; std::vector<int> dummyvec; unsigned int mxpths; if (j < pathdivisor) { mxpths = maxpaths; } else { mxpths = paths.size() % pathdivisor; } for (unsigned int k = 0; k < mxpths; k++) { npaths.push_back(paths.back());//unzipPath(paths.back(), g, begin, end)); paths.pop_back(); } */ pathStore = paths; paths.clear(); if (!recursedloop) { uTraversePath(begin, end, g, false, globalLoopPaths); } else { recursed++; std::set<std::vector<int> > lps = uTraversePath(begin, end, g, true, globalLoopPaths); recursed--; for (std::set<std::vector<int> >::iterator ij = lps.begin(); ij != lps.end(); ij++) { std::vector<int> ijk = (*ij); lps2.push_back(*ij); } } //} #ifdef PERFDEBUG // timeval tim; //std::cout << "begin: " << begin << " end: " << end << std::endl; //gettimeofday(&tim, NULL); //double tim2 = tim.tv_sec+(tim.tv_usec/1000000); //double timeRet = tim2 - tim1; //std::cout << "bfs time elapsed: " << timeRet << std::endl; #endif return lps2; } /** This function calculates all the permutations of loops on paths it also throws away duplicate paths Input: @param[begin] integer representation of first node @param[end] integer representation of the final node @param[g] ambient CFG @param[globalLoopPaths] connects an integer representation of a node to all possible loops starting at that node */ template<class CFG> std::set<std::vector<int> > SgGraphTraversal<CFG>:: uTraversePath(int begin, int end, CFG*& g, bool loop, std::map<int, std::vector<std::vector<int> > >& globalLoopPaths) { //std::cout << "uTraverse" << std::endl; //int doubledpaths = 0; int newmil = 1; //#ifdef LP //if (loop && loopStore.find(begin) != loopStore.end()) { // return loopStore[begin]; //} //#endif #ifdef PERFDEBUG //timeval tim; //gettimeofday(&tim, NULL); //double t1 = tim.tv_sec+(tim.tv_usec/1000000); #endif std::set<std::vector<int> > newpaths; std::set<std::vector<int> > npaths; pathnum = 0; std::vector<int> path; std::vector<std::vector<int> > paths; int truepaths = 0; std::vector<std::vector<int> > checkpaths; std::vector<std::vector<int> > npathchecker; std::map<int, int> currents; //int nnumpaths = 0; std::set<std::vector<int> > loopPaths; //bool threadsafe = true; bool done = false; std::set<std::vector<int> > fts; //double ttfors = 0; //double tperms = 0; while (true) { //std::cout << "paths.size() " << paths.size() << std::endl; if (paths.size() > 1000000) { std::cout << "nearly 1 million paths with no loops, stopping" << std::endl; return loopPaths; std::cout << "ended early" << std::endl; } if (done || borrowed) { if (borrowed) { paths = pathStore; pathStore.clear(); } //std::cout << "paths.size(): " << paths.size() << std::endl; if (paths.size() != 0) { } else { return loopPaths; } // #pragma omp parallel // { #pragma omp parallel for schedule(guided) for (unsigned int qqq = 0; qqq < paths.size(); qqq++) { // std::cout << "pathcheck" << std::endl; //int pathevals = 0; //std::vector<int> zpt = zipPath2(paths[qqq], g); //std::set<std::vector<int> > boxpaths; std::set<std::vector<int> > movepaths; std::vector<int> path;// = paths[qqq]; path = paths[qqq];//unzipPath(paths[qqq], g, begin, end); truepaths++; int permnums = 1; std::vector<int> perms; std::vector<unsigned int> qs; std::map<int, std::vector<std::vector<int> > > localLoops; std::vector<int> takenLoops; takenLoops.push_back(path[0]); bool taken = false; //timeval timfor; int lost = 0; //gettimeofday(&timfor, NULL); //double t1for = timfor.tv_sec + (timfor.tv_usec/1000000); for (unsigned int q = 1; q < path.size()-1; q++) { //if (find(closures.begin(), closures.end(), path[q]) != closures.end()) { if (globalLoopPaths.find(path[q]) != globalLoopPaths.end() /*&& find(lloops.begin(), lloops.end(), path[q]) != lloops.end()*/ && globalLoopPaths[path[q]].size() != 0 /*&& path[q] != begin && path[q] != end*/) { for (unsigned int qp1 = 0; qp1 < globalLoopPaths[path[q]].size(); qp1++) { std::vector<int> gp = globalLoopPaths[path[q]][qp1]; //unzipPath(globalLoopPaths[path[q]][qp1],g,path[q],path[q]); // std::vector<int> zgp = zipPath2(globalLoopPaths[zpt[q]][qp1], g); for (unsigned int qp2 = 0; qp2 < takenLoops.size(); qp2++) { if (find(gp.begin(),gp.end(), takenLoops[qp2]) != gp.end()) { taken = true; } } if (!taken) { localLoops[path[q]].push_back(gp); } else { lost++; taken = false; } } if (localLoops[path[q]].size() != 0) { takenLoops.push_back(path[q]); permnums *= (localLoops[path[q]].size()+1); perms.push_back(permnums); qs.push_back(path[q]); } } } //} //if (loop) { //std::cout << "lostloop: " << lost << std::endl; //} //else { //std::cout << "lostpath: " << lost << std::endl; //} //std::cout << "endpathcheck" << std::endl; //std::cout << "rest" << std::endl; //std::cout << "permnums: " << permnums << std::endl; //gettimeofday(&timfor, NULL); //double t2for = timfor.tv_sec + (timfor.tv_usec/1000000); //double ttfor = t2for - t1for; //#pragma omp atomic //ttfors += ttfor; //std::set<std::vector<int> > movepaths2; std::set<std::vector<int> > movepathscheck; //timeval timperms; //gettimeofday(&timperms, NULL); // double t1perm = timperms.tv_sec + (timperms.tv_usec/1000000); std::vector<int> nvec; std::vector<std::vector<int> > boxpaths(permnums, nvec); //#pragma omp parallel for schedule(guided) for (int i = 1; i <= permnums; i++) { //bool goodthread = false; std::vector<int> loopsTaken; //bool stop = false; unsigned int j = 0; std::vector<int> npath; while (true) { if (j == perms.size() || perms[j] > i) { break; } else { j++; } } int pn = i; std::vector<int> pL; for (unsigned int j1 = 0; j1 <= j; j1++) { pL.push_back(-1); } for (unsigned int k = j; k > 0; k--) { int l = 1; while (perms[k-1]*l < pn) { l++; } pL[k] = l-2; pn -= (perms[k-1]*(l-1)); } pL[0] = pn-2; unsigned int q2 = 0; for (unsigned int q1 = 0; q1 < path.size(); q1++) { if (q2 < qs.size()) { if (qs.size() != 0 && (unsigned)path[q1] == qs[q2] && (size_t)q2 != pL.size()) { if (pL[q2] == -1) { npath.push_back(path[q1]); } else { // if (!stop) { npath.insert(npath.end(), localLoops[path[q1]][pL[q2]].begin(), localLoops[path[q1]][pL[q2]].end()); // } } q2++; } else { npath.push_back(path[q1]); } } else { npath.push_back(path[q1]); } } #ifdef FULLDEBUG std::cout << "path: " << std::endl; for (int qe = 0; qe < npath.size(); qe++) { std::cout << ", " << npath[qe]; } std::cout << std::endl; std::cout << "permnum: " << i << std::endl; #endif // bool addit = false; //if (!stop) { // if (loop && npath.front() == npath.back()) { // addit = true; // } // else if (!loop && bound && npath.front() == begin && npath.back() == end && npath.size() != 1) { // addit = true; // } // else if (!loop && !bound) { // addit = true; // } // if (!addit) { // std::cout << "bad path" << std::endl; // } //bool extra = false; //if (addit && !loop) { //if (movepathscheck.find(npath) == movepathscheck.end()) { //int mpc = movepathscheck.size(); //std::set<std::vector<int> > movepathspre = movepathscheck; // movepaths2.insert(npath); //movepathscheck.insert(npath); //ROSE_ASSERT(movepathscheck.size() == mpc || movepathspre.find(npath) == movepathspre.end()); //if (movepathscheck.size() == mpc) { // extra = true; // } //} //else { //#pragma omp atomic // doubledpaths++; // } //} //if (!workingthread || threadsafe) { //if ((newpaths.size() > 1 || i == permnums || threadsafe)) { // } // } // } //if (!extra) // { //if (movepaths2.size() > 0) //|| i == permnums || threadsafe) // #pragma omp critical // { boxpaths[i-1] = npath; // } // } //std::cout << "endrest" << std::endl; } evaledpaths += boxpaths.size(); if (evaledpaths > newmil*100000ull) { //std::cout << "evaledpaths: " << evaledpaths << std::endl; newmil++; } // #pragma omp critical // { if (!loop) { for (std::vector<std::vector<int> >::iterator box = boxpaths.begin(); box != boxpaths.end(); box++) { std::vector<Vertex> verts; getVertexPath((*box), g, verts); #pragma omp critical { analyzePath(verts); } } } else { #pragma omp critical { loopPaths.insert(boxpaths.begin(), boxpaths.end());; } } } } //} /* #pragma omp atomic evaledpaths++; //pathevals++; if (evaledpaths % 10000 == 0 && evaledpaths != 0) { std::cout << "evaled paths: " << evaledpaths << std::endl; } if (!loop) { std::vector<Vertex> verts; getVertexPath(npath, g, verts); #pragma omp critical { #ifdef FULLDEBUG for (unsigned int aa = 0; aa < npath.size(); aa++) { if (ptsNum.find(npath[aa]) != ptsNum.end()) { ptsNum[npath[aa]] += 1; } else { ptsNum[npath[aa]] = 1; } } #endif analyzePath(verts); } } else if (loop) { //std::vector<int> zpth = zipPath(npath, g, npath.front(), npath.back()); #pragma omp critical { loopPaths.insert(npath);//zipPath(npath, g, npath.front(), npath.back())); } } else { } } */ // movepaths2.clear(); // std::cout << "permnums: " << permnums << std::endl; // std::cout << "evaledpaths final: " << pathevals << std::endl; //gettimeofday(&timperms, NULL); //double t2perm = timperms.tv_sec+(timperms.tv_usec/1000000); //#pragma omp atomic //tperms += t2perm - t1perm; // } //} //} //} #ifdef PERFDEBUG //gettimeofday(&tim, NULL); // double t2 = tim.tv_sec+(tim.tv_usec/1000000.0); // double tperm = t2 - t1perm //double tX = t2 - t1; //std::cout << "begin: " << begin << " end: " << end << std::endl; // std::cout << "uTraverse time: " << tX << std::endl; // std::cout << "tperms: " << tperms << std::endl; // std::cout << "ttfors: " << ttfors << std::endl; // std::cout << "doubledpaths: " << doubledpaths << std::endl; #endif #ifdef LP if (loop) { #ifdef PERFDEBUG // std::cout << "loopPaths: " << loopPaths.size() << std::endl; #endif loopStore[begin] = loopPaths; } #endif return loopPaths; } } /** This is the function that is used by the user directly to start the algorithm. It is immediately available to the user SgGraphTraversal::constructPathAnalyzer Input: @param[begin] Vertex, starting node @param[end] Vertex, endnode @param[g] CFG* g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: constructPathAnalyzer(CFG* g, bool unbounded, Vertex begin, Vertex end, bool ns) { abnormals = 0; normals = 0; if (ns) { needssafety = true; } else { needssafety = false; } checkedfound = 0; recursed = 0; nextsubpath = 0; borrowed = true; stoppedpaths = 0; evaledpaths = 0; badpaths = 0; sourcenum = 0; prepareGraph(g); workingthread = false; workingthreadnum = -1; //std::cout << "markers: " << markers.size() << std::endl; //std::cout << "closures: " << closures.size() << std::endl; //std::cout << "sources: " << sources.size() << std::endl; //std::cout << "sinks" << sinks.size() << std::endl; // printHotness(g); bool subgraph = false; if (!subgraph) { if (!unbounded) { bound = true; recursiveLoops.clear(); recurses.clear(); std::vector<std::vector<int> > spaths = bfsTraversePath(vertintmap[begin], vertintmap[end], g); // std::cout << "spaths: " << spaths.size() << std::endl; } else { std::set<int> usedsources; bound = false; std::vector<int> localLps; for (unsigned int j = 0; j < sources.size(); j++) { sourcenum = sources[j]; recursiveLoops.clear(); recurses.clear(); std::vector<std::vector<int> > spaths = bfsTraversePath(sources[j], -1, g); } } } //std::cout << "checkedfound: " << checkedfound << std::endl; printHotness(g); } /** DEPRECATED This is a function to construct subgraphs for parallelization SgGraphTraversal::computeSubGraphs Input: @param[begin] const int, starting point @param[end] const int ending point @param[g] const CFG*, control flow graph to compute @param[depthDifferential] int, used to specify how large the subgraph should be */ template<class CFG> void SgGraphTraversal<CFG>:: computeSubGraphs(const int& begin, const int &end, CFG*& g, int depthDifferential) { int minDepth = 0; int maxDepth = minDepth + depthDifferential; int currSubGraph = 0; CFG* subGraph; std::set<int> foundNodes; while (true) { Vertex begin = boost::add_vertex(*subGraphVector[currSubGraph]); GraphSubGraphMap[currSubGraph][intvertmap[orderOfNodes[minDepth]]] = intvertmap[begin]; SubGraphGraphMap[currSubGraph][intvertmap[begin]] = intvertmap[orderOfNodes[minDepth]]; for (int i = minDepth; i <= maxDepth; i++) { Vertex v = GraphSubGraphMap[currSubGraph][intvertmap[orderOfNodes[i]]]; std::vector<int> outEdges = getOutEdges(orderOfNodes[i], g); for (unsigned int j = 0; j < outEdges.size(); j++) { Vertex u; if (foundNodes.find(getTarget(outEdges[j], g)) == foundNodes.end()) { u = GraphSubGraphMap[currSubGraph][intvertmap[getTarget(outEdges[j], g)]]; } else { u = boost::add_vertex(*subGraphVector[currSubGraph]); foundNodes.insert(getTarget(outEdges[j], g)); SubGraphGraphMap[currSubGraph][u] = intvertmap[getTarget(outEdges[j], g)]; GraphSubGraphMap[currSubGraph][intvertmap[getTarget(outEdges[j], g)]] = u; } Edge edge; bool ok; boost::tie(edge, ok) = boost::add_edge(v,u,*subGraphVector[currSubGraph]); } } minDepth = maxDepth; if ((unsigned int) minDepth == orderOfNodes.size()-1) { break; } maxDepth += depthDifferential; if ((unsigned int) maxDepth > orderOfNodes.size()-1) { maxDepth = orderOfNodes.size()-1; } CFG* newSubGraph; subGraphVector.push_back(newSubGraph); currSubGraph++; } return; } /* These should NOT be used by the user. They are simply for writing interesting information on the DOT graphs of the CFG */ template<class CFG> void SgGraphTraversal<CFG>:: printCFGNodeGeneric(int &cf, std::string prop, std::ofstream& o) { std::string nodeColor = "black"; o << cf << " [label=\"" << " num:" << cf << " prop: " << prop << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n"; } template<class CFG> void SgGraphTraversal<CFG>:: printCFGNode(int& cf, std::ofstream& o) { #ifdef FULLDEBUG int pts = ptsNum[cf]; std::string nodeColor = "black"; o << cf << " [label=\"" << " pts: " << pts << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n"; #endif #ifndef FULLDEBUG std::string nodeColor = "black"; o << cf << " [label=\"" << " num:" << cf << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n"; #endif } template<class CFG> void SgGraphTraversal<CFG>:: printCFGEdge(int& cf, CFG*& cfg, std::ofstream& o) { int src = getSource(cf, cfg); int tar = getTarget(cf, cfg); o << src << " -> " << tar << " [label=\"" << src << " " << tar << "\", style=\"" << "solid" << "\"];\n"; } template<class CFG> void SgGraphTraversal<CFG>:: printHotness(CFG*& g) { const CFG* gc = g; int currhot = 0; std::ofstream mf; std::stringstream filenam; filenam << "hotness" << currhot << ".dot"; currhot++; std::string fn = filenam.str(); mf.open(fn.c_str()); mf << "digraph defaultName { \n"; // DQ (4/11/2017): Fix Klockworks issue of uninitialized variables. #if 1 vertex_iterator v, vend; edge_iterator e, eend; #else // This does not compile. vertex_iterator v = vertices(*gc).begin(); vertex_iterator vend = v; edge_iterator e = edges(*gc).begin(); edge_iterator eend = e; #endif for (boost::tie(v, vend) = vertices(*gc); v != vend; ++v) { printCFGNode(vertintmap[*v], mf); } for (tie(e, eend) = edges(*gc); e != eend; ++e) { printCFGEdge(edgeintmap[*e], g, mf); } mf.close(); } template<class CFG> void SgGraphTraversal<CFG>:: printPathDot(CFG*& g) { const CFG* gc = g; std::ofstream mf; std::stringstream filenam; filenam << "pathnums.dot"; std::string fn = filenam.str(); mf.open(fn.c_str()); mf << "digraph defaultName { \n"; vertex_iterator v, vend; edge_iterator e, eend; for (tie(v, vend) = vertices(*gc); v != vend; ++v) { if (nodeStrings.find(vertintmap[*v]) != nodeStrings.end()) { int nn = vertintmap[*v]; printCFGNodeGeneric(vertintmap[*v], nodeStrings[nn], mf); } else { printCFGNodeGeneric(vertintmap[*v], "noprop", mf); } } for (tie(e, eend) = edges(*gc); e != eend; ++e) { printCFGEdge(edgeintmap[*e], g, mf); } mf.close(); } /** This is the function that preps the graph for traversal SgGraphTraversal::prepareGraph Input: @param[g] CFG*& g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: prepareGraph(CFG*& g) { nextNode = 1; nextEdge = 1; findClosuresAndMarkersAndEnumerate(g); } /** DEPRECATED This is the function that preps the graph for traversal, currently this one isn't used but for many traversals on one visitor may necessitate SgGraphTraversal::firstPrepGraph Input: @param[g] CFG*& g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: firstPrepGraph(CFG*& g) { nextNode = 1; nextEdge = 1; findClosuresAndMarkersAndEnumerate(g); } /** This calculates nodes with more than one in edge or more than one out edge SgGraphTraversal::findClosuresAndMarkers Input: @param[g] CFG*& g, CFG calculated previously */ template<class CFG> void SgGraphTraversal<CFG>:: findClosuresAndMarkersAndEnumerate(CFG*& g) { // DQ (4/11/2017): Fix Klockworks issue of uninitialized variables. #if 1 edge_iterator e, eend; #else edge_iterator e = edges(*g).begin(); edge_iterator eend = e; #endif for (tie(e, eend) = edges(*g); e != eend; ++e) { intedgemap[nextEdge] = *e; edgeintmap[*e] = nextEdge; nextEdge++; } // DQ (4/11/2017): Fix Klockworks issue of uninitialized variables. #if 1 vertex_iterator v1, vend1; #else vertex_iterator v1 = vertices(*g).begin(); vertex_iterator vend1 = v1; #endif for (boost::tie(v1, vend1) = vertices(*g); v1 != vend1; ++v1) { vertintmap[*v1] = nextNode; intvertmap[nextNode] = *v1; nextNode++; } // DQ (4/11/2017): Fix Klockworks issue of uninitialized variables. #if 1 vertex_iterator v, vend; #else vertex_iterator v = vertices(*g).begin(); vertex_iterator vend = v; #endif for (boost::tie(v, vend) = vertices(*g); v != vend; ++v) { std::vector<int> outs = getOutEdges(vertintmap[*v], g); std::vector<int> ins = getInEdges(vertintmap[*v], g); if (outs.size() > 1) { markers.push_back(vertintmap[*v]); markerIndex[vertintmap[*v]] = markers.size()-1; for (unsigned int i = 0; i < outs.size(); i++) { pathsAtMarkers[vertintmap[*v]].push_back(getTarget(outs[i], g)); } } if (ins.size() > 1) { closures.push_back(vertintmap[*v]); } if (outs.size() == 0) { sinks.push_back(vertintmap[*v]); } if (ins.size() == 0) { sources.push_back(vertintmap[*v]); } } return; } /** DEPRECATED Currently unused but will be necessary for parallelization in progress SgGraphTraversal::computeOrder @param[g] CFG* cfg in question @parm[begin] const int, integer representation of source node */ template<class CFG> void SgGraphTraversal<CFG>:: computeOrder(CFG*& g, const int& begin) { std::vector<int> currentNodes; std::vector<int> newCurrentNodes; currentNodes.push_back(begin); std::map<int, int> reverseCurrents; orderOfNodes.push_back(begin); std::set<int> heldBackNodes; while (currentNodes.size() != 0) { for (unsigned int j = 0; j < currentNodes.size(); j++) { std::vector<int> inEdges = getInEdges(currentNodes[j], g); if (inEdges.size() > 1) { if (reverseCurrents.find(currentNodes[j]) == reverseCurrents.end()) { reverseCurrents[currentNodes[j]] = 0; } if ((unsigned int) reverseCurrents[currentNodes[j]] == inEdges.size() - 1) { heldBackNodes.erase(currentNodes[j]); reverseCurrents[currentNodes[j]]++; std::vector<int> outEdges = getOutEdges(currentNodes[j], g); for (unsigned int k = 0; k < outEdges.size(); k++) { newCurrentNodes.push_back(getTarget(outEdges[k], g)); orderOfNodes.push_back(getTarget(outEdges[k], g)); } } else if (reverseCurrents[currentNodes[j]] < reverseCurrents.size()) { reverseCurrents[currentNodes[j]]++; if (heldBackNodes.find(currentNodes[j]) == heldBackNodes.end()) { heldBackNodes.insert(currentNodes[j]); } } } else { std::vector<int> outEdges = getOutEdges(currentNodes[j], g); for (unsigned int k = 0; k < outEdges.size(); k++) { newCurrentNodes.push_back(getTarget(outEdges[k], g)); orderOfNodes.push_back(getTarget(outEdges[k], g)); } } } if (newCurrentNodes.size() == 0 && heldBackNodes.size() != 0) { for (std::set<int>::iterator q = heldBackNodes.begin(); q != heldBackNodes.end(); q++) { int qint = *q; std::vector<int> heldBackOutEdges = getOutEdges(qint, g); for (unsigned int p = 0; p < heldBackOutEdges.size(); p++) { newCurrentNodes.push_back(getTarget(heldBackOutEdges[p], g)); } } heldBackNodes.clear(); } currentNodes = newCurrentNodes; newCurrentNodes.clear(); } return; } /** Converts the path calculated by this algorithm to Vertices so users can access data SgGraphTraversal::getVertexPath @param[path] integer representation of path @param[g] CFG*, cfg in question @param[vertexPath] for some reason this can't be a return value so it is changed via pass by reference */ template<class CFG> void SgGraphTraversal<CFG>:: getVertexPath(std::vector<int> path, CFG*& g, std::vector<Vertex>& vertexPath) { for (unsigned int i = 0; i < path.size(); i++) { vertexPath.push_back(intvertmap[path[i]]); } } /** DEPRECATED Currently unused, may eventually be modified for optimal storage purposes SgGraphTraversal::storeCompact @param[compactPath] path to be compactified */ template<class CFG> void SgGraphTraversal<CFG>:: storeCompact(std::vector<int> compactPath) { return; }
convolution_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*25 + q*25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; int i = 0; for (; i+1 < outh; i+=2) { int remain = outw; for (; remain>0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } }
do_kahan_sum_omp.c
double do_kahan_sum_omp(double* restrict var, long ncells) { struct esum_type{ double sum; double correction; }; double sum = 0.0; #pragma omp parallel reduction(+:sum) { double corrected_next_term, new_sum; struct esum_type local; local.sum = 0.0; local.correction = 0.0; #pragma omp for for (long i = 0; i < ncells; i++) { corrected_next_term= var[i] + local.correction; new_sum = local.sum + local.correction; local.correction = corrected_next_term - (new_sum - local.sum); local.sum = new_sum; } sum += local.correction; #pragma omp barrier sum += local.sum; } return(sum); }
csr_block_matvec.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matvec functions for hypre_CSRBlockMatrix class. * *****************************************************************************/ #include "csr_block_matrix.h" #include "../seq_mv/seq_mv.h" /*-------------------------------------------------------------------------- * hypre_CSRBlockMatrixMatvec *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRBlockMatrixMatvec(HYPRE_Complex alpha, hypre_CSRBlockMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y) { HYPRE_Complex *A_data = hypre_CSRBlockMatrixData(A); HYPRE_Int *A_i = hypre_CSRBlockMatrixI(A); HYPRE_Int *A_j = hypre_CSRBlockMatrixJ(A); HYPRE_Int num_rows = hypre_CSRBlockMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRBlockMatrixNumCols(A); HYPRE_Int blk_size = hypre_CSRBlockMatrixBlockSize(A); HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int x_size = hypre_VectorSize(x); HYPRE_Int y_size = hypre_VectorSize(y); HYPRE_Int i, b1, b2, jj, bnnz=blk_size*blk_size; HYPRE_Int ierr = 0; HYPRE_Complex temp; /*--------------------------------------------------------------------- * Check for size compatibility. Matvec returns ierr = 1 if * length of X doesn't equal the number of columns of A, * ierr = 2 if the length of Y doesn't equal the number of rows * of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in Matvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_cols*blk_size != x_size) ierr = 1; if (num_rows*blk_size != y_size) ierr = 2; if (num_cols*blk_size != x_size && num_rows*blk_size != y_size) ierr = 3; /*----------------------------------------------------------------------- * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS *-----------------------------------------------------------------------*/ if (alpha == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*blk_size; i++) y_data[i] *= beta; return ierr; } /*----------------------------------------------------------------------- * y = (beta/alpha)*y *-----------------------------------------------------------------------*/ temp = beta / alpha; if (temp != 1.0) { if (temp == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*blk_size; i++) y_data[i] = 0.0; } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*blk_size; i++) y_data[i] *= temp; } } /*----------------------------------------------------------------- * y += A*x *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jj,b1,b2,temp) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { for (jj = A_i[i]; jj < A_i[i+1]; jj++) { for (b1 = 0; b1 < blk_size; b1++) { temp = y_data[i*blk_size+b1]; for (b2 = 0; b2 < blk_size; b2++) temp += A_data[jj*bnnz+b1*blk_size+b2] * x_data[A_j[jj]*blk_size+b2]; y_data[i*blk_size+b1] = temp; } } } /*----------------------------------------------------------------- * y = alpha*y *-----------------------------------------------------------------*/ if (alpha != 1.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*blk_size; i++) y_data[i] *= alpha; } return ierr; } /*-------------------------------------------------------------------------- * hypre_CSRBlockMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * * From Van Henson's modification of hypre_CSRMatrixMatvec. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRBlockMatrixMatvecT( HYPRE_Complex alpha, hypre_CSRBlockMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { HYPRE_Complex *A_data = hypre_CSRBlockMatrixData(A); HYPRE_Int *A_i = hypre_CSRBlockMatrixI(A); HYPRE_Int *A_j = hypre_CSRBlockMatrixJ(A); HYPRE_Int num_rows = hypre_CSRBlockMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRBlockMatrixNumCols(A); HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int x_size = hypre_VectorSize(x); HYPRE_Int y_size = hypre_VectorSize(y); HYPRE_Complex temp; HYPRE_Int i, j, jj; HYPRE_Int ierr = 0; HYPRE_Int b1, b2; HYPRE_Int blk_size = hypre_CSRBlockMatrixBlockSize(A); HYPRE_Int bnnz=blk_size*blk_size; /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows*blk_size != x_size) ierr = 1; if (num_cols*blk_size != y_size) ierr = 2; if (num_rows*blk_size != x_size && num_cols*blk_size != y_size) ierr = 3; /*----------------------------------------------------------------------- * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS *-----------------------------------------------------------------------*/ if (alpha == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*blk_size; i++) y_data[i] *= beta; return ierr; } /*----------------------------------------------------------------------- * y = (beta/alpha)*y *-----------------------------------------------------------------------*/ temp = beta / alpha; if (temp != 1.0) { if (temp == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*blk_size; i++) y_data[i] = 0.0; } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*blk_size; i++) y_data[i] *= temp; } } /*----------------------------------------------------------------- * y += A^T*x *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i, jj,j, b1, b2) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { for (jj = A_i[i]; jj < A_i[i+1]; jj++) /*each nonzero in that row*/ { for (b1 = 0; b1 < blk_size; b1++) /*row */ { for (b2 = 0; b2 < blk_size; b2++) /*col*/ { j = A_j[jj]; /*col */ y_data[j*blk_size+b2] += A_data[jj*bnnz+b1*blk_size+b2] * x_data[i*blk_size + b1]; } } } } /*----------------------------------------------------------------- * y = alpha*y *-----------------------------------------------------------------*/ if (alpha != 1.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*blk_size; i++) y_data[i] *= alpha; } return ierr; }
setfeenv.c
#ifndef CRAY # ifdef NOUNDERSCORE # define SETFEENV setfeenv # else # ifdef F2CSTYLE # define SETFEENV setfeenv__ # else # define SETFEENV setfeenv_ # endif # endif #endif #include <fenv.h> #include <stdio.h> void SETFEENV() { fenv_t envp; int stat; #ifdef _OPENMP stat = fegetenv(&envp); /* if (fesetenv(&envp) != 0) { perror("Error getting fp env"); } */ #pragma omp parallel shared(envp) { stat = fesetenv(&envp); /* if (fesetenv(&envp) != 0) { perror("Error setting fp env"); } */ } #endif }
TGV_core.c
/* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC * * Copyright 2019 Daniil Kazantsev * Copyright 2019 Srikanth Nagella, Edoardo Pasca * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "TGV_core.h" /* C-OMP implementation of Primal-Dual denoising method for * Total Generilized Variation (TGV)-L2 model [1] (2D/3D case) * * Input Parameters: * 1. Noisy image/volume (2D/3D) * 2. lambda - regularisation parameter * 3. parameter to control the first-order term (alpha1) * 4. parameter to control the second-order term (alpha0) * 5. Number of Chambolle-Pock (Primal-Dual) iterations * 6. Lipshitz constant (default is 12) * 7. eplsilon: tolerance constant * * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * References: * [1] K. Bredies "Total Generalized Variation" * */ float TGV_main(float *U0, float *U, float *infovector, float lambda, float alpha1, float alpha0, int iter, float L2, float epsil, int dimX, int dimY, int dimZ) { long DimTotal; int ll, j; float re, re1; re = 0.0f; re1 = 0.0f; int count = 0; float *U_old, *P1, *P2, *Q1, *Q2, *Q3, *V1, *V1_old, *V2, *V2_old, tau, sigma; DimTotal = (long)(dimX*dimY*dimZ); copyIm(U0, U, (long)(dimX), (long)(dimY), (long)(dimZ)); /* initialize */ tau = pow(L2,-0.5); sigma = pow(L2,-0.5); /* dual variables */ P1 = calloc(DimTotal, sizeof(float)); P2 = calloc(DimTotal, sizeof(float)); Q1 = calloc(DimTotal, sizeof(float)); Q2 = calloc(DimTotal, sizeof(float)); Q3 = calloc(DimTotal, sizeof(float)); U_old = calloc(DimTotal, sizeof(float)); V1 = calloc(DimTotal, sizeof(float)); V1_old = calloc(DimTotal, sizeof(float)); V2 = calloc(DimTotal, sizeof(float)); V2_old = calloc(DimTotal, sizeof(float)); if (dimZ == 1) { /*2D case*/ /* Primal-dual iterations begin here */ for(ll = 0; ll < iter; ll++) { /* Calculate Dual Variable P */ DualP_2D(U, V1, V2, P1, P2, (long)(dimX), (long)(dimY), sigma); /*Projection onto convex set for P*/ ProjP_2D(P1, P2, (long)(dimX), (long)(dimY), alpha1); /* Calculate Dual Variable Q */ DualQ_2D(V1, V2, Q1, Q2, Q3, (long)(dimX), (long)(dimY), sigma); /*Projection onto convex set for Q*/ ProjQ_2D(Q1, Q2, Q3, (long)(dimX), (long)(dimY), alpha0); /*saving U into U_old*/ copyIm(U, U_old, (long)(dimX), (long)(dimY), 1l); /*adjoint operation -> divergence and projection of P*/ DivProjP_2D(U, U0, P1, P2, (long)(dimX), (long)(dimY), lambda, tau); /*get updated solution U*/ newU(U, U_old, (long)(dimX), (long)(dimY)); /*saving V into V_old*/ copyIm(V1, V1_old, (long)(dimX), (long)(dimY), 1l); copyIm(V2, V2_old, (long)(dimX), (long)(dimY), 1l); /* upd V*/ UpdV_2D(V1, V2, P1, P2, Q1, Q2, Q3, (long)(dimX), (long)(dimY), tau); /*get new V*/ newU(V1, V1_old, (long)(dimX), (long)(dimY)); newU(V2, V2_old, (long)(dimX), (long)(dimY)); /* check early stopping criteria */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(U[j] - U_old[j],2); re1 += powf(U[j],2); } re = sqrtf(re)/sqrtf(re1); if (re < epsil) count++; if (count > 3) break; } } /*end of iterations*/ } else { /*3D case*/ float *P3, *Q4, *Q5, *Q6, *V3, *V3_old; P3 = calloc(DimTotal, sizeof(float)); Q4 = calloc(DimTotal, sizeof(float)); Q5 = calloc(DimTotal, sizeof(float)); Q6 = calloc(DimTotal, sizeof(float)); V3 = calloc(DimTotal, sizeof(float)); V3_old = calloc(DimTotal, sizeof(float)); /* Primal-dual iterations begin here */ for(ll = 0; ll < iter; ll++) { /* Calculate Dual Variable P */ DualP_3D(U, V1, V2, V3, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), sigma); /*Projection onto convex set for P*/ ProjP_3D(P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), alpha1); /* Calculate Dual Variable Q */ DualQ_3D(V1, V2, V3, Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), sigma); /*Projection onto convex set for Q*/ ProjQ_3D(Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), alpha0); /*saving U into U_old*/ copyIm(U, U_old, (long)(dimX), (long)(dimY), (long)(dimZ)); /*adjoint operation -> divergence and projection of P*/ DivProjP_3D(U, U0, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), lambda, tau); /*get updated solution U*/ newU3D(U, U_old, (long)(dimX), (long)(dimY), (long)(dimZ)); /*saving V into V_old*/ copyIm_3Ar(V1, V2, V3, V1_old, V2_old, V3_old, (long)(dimX), (long)(dimY), (long)(dimZ)); /* upd V*/ UpdV_3D(V1, V2, V3, P1, P2, P3, Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), tau); /*get new V*/ newU3D_3Ar(V1, V2, V3, V1_old, V2_old, V3_old, (long)(dimX), (long)(dimY), (long)(dimZ)); /* check early stopping criteria */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(U[j] - U_old[j],2); re1 += powf(U[j],2); } re = sqrtf(re)/sqrtf(re1); if (re < epsil) count++; if (count > 3) break; } } /*end of iterations*/ free(P3);free(Q4);free(Q5);free(Q6);free(V3);free(V3_old); } /*freeing*/ free(P1);free(P2);free(Q1);free(Q2);free(Q3);free(U_old); free(V1);free(V2);free(V1_old);free(V2_old); /*adding info into info_vector */ infovector[0] = (float)(ll); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; } /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ /*Calculating dual variable P (using forward differences)*/ float DualP_2D(float *U, float *V1, float *V2, float *P1, float *P2, long dimX, long dimY, float sigma) { long i,j, index; #pragma omp parallel for shared(U,V1,V2,P1,P2) private(i,j,index) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; /* symmetric boundary conditions (Neuman) */ if (i == dimX-1) P1[index] += sigma*(-V1[index]); else P1[index] += sigma*((U[j*dimX+(i+1)] - U[index]) - V1[index]); if (j == dimY-1) P2[index] += sigma*(-V2[index]); else P2[index] += sigma*((U[(j+1)*dimX+i] - U[index]) - V2[index]); }} return 1; } /*Projection onto convex set for P*/ float ProjP_2D(float *P1, float *P2, long dimX, long dimY, float alpha1) { float grad_magn; long i,j,index; #pragma omp parallel for shared(P1,P2) private(i,j,index,grad_magn) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; grad_magn = (sqrtf(P1[index]*P1[index] + P2[index]*P2[index]))/alpha1; if (grad_magn > 1.0f) { P1[index] /= grad_magn; P2[index] /= grad_magn; } }} return 1; } /*Calculating dual variable Q (using forward differences)*/ float DualQ_2D(float *V1, float *V2, float *Q1, float *Q2, float *Q3, long dimX, long dimY, float sigma) { long i,j,index; float q1, q2, q11, q22; #pragma omp parallel for shared(Q1,Q2,Q3,V1,V2) private(i,j,index,q1,q2,q11,q22) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; q1 = 0.0f; q11 = 0.0f; q2 = 0.0f; q22 = 0.0f; /* boundary conditions (Neuman) */ if (i != dimX-1){ q1 = V1[j*dimX+(i+1)] - V1[index]; q11 = V2[j*dimX+(i+1)] - V2[index]; } if (j != dimY-1) { q2 = V2[(j+1)*dimX+i] - V2[index]; q22 = V1[(j+1)*dimX+i] - V1[index]; } Q1[index] += sigma*(q1); Q2[index] += sigma*(q2); Q3[index] += sigma*(0.5f*(q11 + q22)); }} return 1; } float ProjQ_2D(float *Q1, float *Q2, float *Q3, long dimX, long dimY, float alpha0) { float grad_magn; long i,j,index; #pragma omp parallel for shared(Q1,Q2,Q3) private(i,j,index,grad_magn) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; grad_magn = sqrtf(Q1[index]*Q1[index] + Q2[index]*Q2[index] + 2*Q3[index]*Q3[index]); grad_magn = grad_magn/alpha0; if (grad_magn > 1.0f) { Q1[index] /= grad_magn; Q2[index] /= grad_magn; Q3[index] /= grad_magn; } }} return 1; } /* Divergence and projection for P (backward differences)*/ float DivProjP_2D(float *U, float *U0, float *P1, float *P2, long dimX, long dimY, float lambda, float tau) { long i,j,index; float P_v1, P_v2, div; #pragma omp parallel for shared(U,U0,P1,P2) private(i,j,index,P_v1,P_v2,div) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; if (i == 0) P_v1 = P1[index]; else if (i == dimX-1) P_v1 = -P1[j*dimX+(i-1)]; else P_v1 = P1[index] - P1[j*dimX+(i-1)]; if (j == 0) P_v2 = P2[index]; else if (j == dimY-1) P_v2 = -P2[(j-1)*dimX+i]; else P_v2 = P2[index] - P2[(j-1)*dimX+i]; div = P_v1 + P_v2; U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau); }} return *U; } /*get updated solution U*/ float newU(float *U, float *U_old, long dimX, long dimY) { long i; #pragma omp parallel for shared(U,U_old) private(i) for(i=0; i<dimX*dimY; i++) U[i] = 2*U[i] - U_old[i]; return *U; } /*get update for V (backward differences)*/ float UpdV_2D(float *V1, float *V2, float *P1, float *P2, float *Q1, float *Q2, float *Q3, long dimX, long dimY, float tau) { long i, j, index; float q1, q3_x, q3_y, q2, div1, div2; #pragma omp parallel for shared(V1,V2,P1,P2,Q1,Q2,Q3) private(i, j, index, q1, q3_x, q3_y, q2, div1, div2) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; /* boundary conditions (Neuman) */ if (i == 0) { q1 = Q1[index]; q3_x = Q3[index]; } else if (i == dimX-1) { q1 = -Q1[j*dimX+(i-1)]; q3_x = -Q3[j*dimX+(i-1)]; } else { q1 = Q1[index] - Q1[j*dimX+(i-1)]; q3_x = Q3[index] - Q3[j*dimX+(i-1)]; } if (j == 0) { q2 = Q2[index]; q3_y = Q3[index]; } else if (j == dimY-1) { q2 = -Q2[(j-1)*dimX+i]; q3_y = -Q3[(j-1)*dimX+i]; } else { q2 = Q2[index] - Q2[(j-1)*dimX+i]; q3_y = Q3[index] - Q3[(j-1)*dimX+i]; } div1 = q1 + q3_y; div2 = q3_x + q2; V1[index] += tau*(P1[index] + div1); V2[index] += tau*(P2[index] + div2); }} return 1; } /********************************************************************/ /***************************3D Functions*****************************/ /********************************************************************/ /*Calculating dual variable P (using forward differences)*/ float DualP_3D(float *U, float *V1, float *V2, float *V3, float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float sigma) { long i,j,k, index; #pragma omp parallel for shared(U,V1,V2,V3,P1,P2,P3) private(i,j,k,index) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ if (i == dimX-1) P1[index] += sigma*(-V1[index]); else P1[index] += sigma*((U[(dimX*dimY)*k + j*dimX+(i+1)] - U[index]) - V1[index]); if (j == dimY-1) P2[index] += sigma*(-V2[index]); else P2[index] += sigma*((U[(dimX*dimY)*k + (j+1)*dimX+i] - U[index]) - V2[index]); if (k == dimZ-1) P3[index] += sigma*(-V3[index]); else P3[index] += sigma*((U[(dimX*dimY)*(k+1) + j*dimX+i] - U[index]) - V3[index]); }}} return 1; } /*Projection onto convex set for P*/ float ProjP_3D(float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float alpha1) { float grad_magn; long i,j,k,index; #pragma omp parallel for shared(P1,P2,P3) private(i,j,k,index,grad_magn) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; grad_magn = (sqrtf(P1[index]*P1[index] + P2[index]*P2[index]+ P3[index]*P3[index]))/alpha1; if (grad_magn > 1.0f) { P1[index] /= grad_magn; P2[index] /= grad_magn; P3[index] /= grad_magn; } }}} return 1; } /*Calculating dual variable Q (using forward differences)*/ float DualQ_3D(float *V1, float *V2, float *V3, float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float sigma) { long i,j,k,index; float q1, q2, q3, q11, q22, q33, q44, q55, q66; #pragma omp parallel for shared(Q1,Q2,Q3,Q4,Q5,Q6,V1,V2,V3) private(i,j,k,index,q1,q2,q3,q11,q22,q33,q44,q55,q66) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; q1 = 0.0f; q11 = 0.0f; q33 = 0.0f; q2 = 0.0f; q22 = 0.0f; q55 = 0.0f; q3 = 0.0f; q44 = 0.0f; q66 = 0.0f; /* symmetric boundary conditions (Neuman) */ if (i != dimX-1){ q1 = V1[(dimX*dimY)*k + j*dimX+(i+1)] - V1[index]; q11 = V2[(dimX*dimY)*k + j*dimX+(i+1)] - V2[index]; q33 = V3[(dimX*dimY)*k + j*dimX+(i+1)] - V3[index]; } if (j != dimY-1) { q2 = V2[(dimX*dimY)*k + (j+1)*dimX+i] - V2[index]; q22 = V1[(dimX*dimY)*k + (j+1)*dimX+i] - V1[index]; q55 = V3[(dimX*dimY)*k + (j+1)*dimX+i] - V3[index]; } if (k != dimZ-1) { q3 = V3[(dimX*dimY)*(k+1) + j*dimX+i] - V3[index]; q44 = V1[(dimX*dimY)*(k+1) + j*dimX+i] - V1[index]; q66 = V2[(dimX*dimY)*(k+1) + j*dimX+i] - V2[index]; } Q1[index] += sigma*(q1); /*Q11*/ Q2[index] += sigma*(q2); /*Q22*/ Q3[index] += sigma*(q3); /*Q33*/ Q4[index] += sigma*(0.5f*(q11 + q22)); /* Q21 / Q12 */ Q5[index] += sigma*(0.5f*(q33 + q44)); /* Q31 / Q13 */ Q6[index] += sigma*(0.5f*(q55 + q66)); /* Q32 / Q23 */ }}} return 1; } float ProjQ_3D(float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float alpha0) { float grad_magn; long i,j,k,index; #pragma omp parallel for shared(Q1,Q2,Q3,Q4,Q5,Q6) private(i,j,k,index,grad_magn) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; grad_magn = sqrtf(Q1[index]*Q1[index] + Q2[index]*Q2[index] + Q3[index]*Q3[index] + 2.0f*Q4[index]*Q4[index] + 2.0f*Q5[index]*Q5[index] + 2.0f*Q6[index]*Q6[index]); grad_magn = grad_magn/alpha0; if (grad_magn > 1.0f) { Q1[index] /= grad_magn; Q2[index] /= grad_magn; Q3[index] /= grad_magn; Q4[index] /= grad_magn; Q5[index] /= grad_magn; Q6[index] /= grad_magn; } }}} return 1; } /* Divergence and projection for P*/ float DivProjP_3D(float *U, float *U0, float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float lambda, float tau) { long i,j,k,index; float P_v1, P_v2, P_v3, div; #pragma omp parallel for shared(U,U0,P1,P2,P3) private(i,j,k,index,P_v1,P_v2,P_v3,div) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; if (i == 0) P_v1 = P1[index]; else if (i == dimX-1) P_v1 = -P1[(dimX*dimY)*k + j*dimX+(i-1)]; else P_v1 = P1[index] - P1[(dimX*dimY)*k + j*dimX+(i-1)]; if (j == 0) P_v2 = P2[index]; else if (j == dimY-1) P_v2 = -P2[(dimX*dimY)*k + (j-1)*dimX+i]; else P_v2 = P2[index] - P2[(dimX*dimY)*k + (j-1)*dimX+i]; if (k == 0) P_v3 = P3[index]; else if (k == dimZ-1) P_v3 = -P3[(dimX*dimY)*(k-1) + (j)*dimX+i]; else P_v3 = P3[index] - P3[(dimX*dimY)*(k-1) + (j)*dimX+i]; div = P_v1 + P_v2 + P_v3; U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau); }}} return *U; } /*get update for V*/ float UpdV_3D(float *V1, float *V2, float *V3, float *P1, float *P2, float *P3, float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float tau) { long i,j,k,index; float q1, q4x, q5x, q2, q4y, q6y, q6z, q5z, q3, div1, div2, div3; #pragma omp parallel for shared(V1,V2,V3,P1,P2,P3,Q1,Q2,Q3,Q4,Q5,Q6) private(i,j,k,index,q1,q4x,q5x,q2,q4y,q6y,q6z,q5z,q3,div1,div2,div3) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; q1 = 0.0f; q4x= 0.0f; q5x= 0.0f; q2= 0.0f; q4y= 0.0f; q6y= 0.0f; q6z= 0.0f; q5z= 0.0f; q3= 0.0f; /* Q1 - Q11, Q2 - Q22, Q3 - Q33, Q4 - Q21/Q12, Q5 - Q31/Q13, Q6 - Q32/Q23*/ /* symmetric boundary conditions (Neuman) */ if (i == 0) { q1 = Q1[index]; q4x = Q4[index]; q5x = Q5[index]; } else if (i == dimX-1) { q1 = -Q1[(dimX*dimY)*k + j*dimX+(i-1)]; q4x = -Q4[(dimX*dimY)*k + j*dimX+(i-1)]; q5x = -Q5[(dimX*dimY)*k + j*dimX+(i-1)]; } else { q1 = Q1[index] - Q1[(dimX*dimY)*k + j*dimX+(i-1)]; q4x = Q4[index] - Q4[(dimX*dimY)*k + j*dimX+(i-1)]; q5x = Q5[index] - Q5[(dimX*dimY)*k + j*dimX+(i-1)]; } if (j == 0) { q2 = Q2[index]; q4y = Q4[index]; q6y = Q6[index]; } else if (j == dimY-1) { q2 = -Q2[(dimX*dimY)*k + (j-1)*dimX+i]; q4y = -Q4[(dimX*dimY)*k + (j-1)*dimX+i]; q6y = -Q6[(dimX*dimY)*k + (j-1)*dimX+i]; } else { q2 = Q2[index] - Q2[(dimX*dimY)*k + (j-1)*dimX+i]; q4y = Q4[index] - Q4[(dimX*dimY)*k + (j-1)*dimX+i]; q6y = Q6[index] - Q6[(dimX*dimY)*k + (j-1)*dimX+i]; } if (k == 0) { q6z = Q6[index]; q5z = Q5[index]; q3 = Q3[index]; } else if (k == dimZ-1) { q6z = -Q6[(dimX*dimY)*(k-1) + (j)*dimX+i]; q5z = -Q5[(dimX*dimY)*(k-1) + (j)*dimX+i]; q3 = -Q3[(dimX*dimY)*(k-1) + (j)*dimX+i]; } else { q6z = Q6[index] - Q6[(dimX*dimY)*(k-1) + (j)*dimX+i]; q5z = Q5[index] - Q5[(dimX*dimY)*(k-1) + (j)*dimX+i]; q3 = Q3[index] - Q3[(dimX*dimY)*(k-1) + (j)*dimX+i]; } div1 = q1 + q4y + q5z; div2 = q4x + q2 + q6z; div3 = q5x + q6y + q3; V1[index] += tau*(P1[index] + div1); V2[index] += tau*(P2[index] + div2); V3[index] += tau*(P3[index] + div3); }}} return 1; } float copyIm_3Ar(float *V1, float *V2, float *V3, float *V1_old, float *V2_old, float *V3_old, long dimX, long dimY, long dimZ) { long j; #pragma omp parallel for shared(V1, V2, V3, V1_old, V2_old, V3_old) private(j) for (j = 0; j<dimX*dimY*dimZ; j++) { V1_old[j] = V1[j]; V2_old[j] = V2[j]; V3_old[j] = V3[j]; } return 1; } /*get updated solution U*/ float newU3D(float *U, float *U_old, long dimX, long dimY, long dimZ) { long i; #pragma omp parallel for shared(U, U_old) private(i) for(i=0; i<dimX*dimY*dimZ; i++) U[i] = 2.0f*U[i] - U_old[i]; return *U; } /*get updated solution U*/ float newU3D_3Ar(float *V1, float *V2, float *V3, float *V1_old, float *V2_old, float *V3_old, long dimX, long dimY, long dimZ) { long i; #pragma omp parallel for shared(V1, V2, V3, V1_old, V2_old, V3_old) private(i) for(i=0; i<dimX*dimY*dimZ; i++) { V1[i] = 2.0f*V1[i] - V1_old[i]; V2[i] = 2.0f*V2[i] - V2_old[i]; V3[i] = 2.0f*V3[i] - V3_old[i]; } return 1; }
phpassMD5_fmt_plug.c
/* * This software was written by Jim Fougeron jfoug AT cox dot net in 2009. * No copyright is claimed, and the software is hereby placed in the public * domain. In case this attempt to disclaim copyright and place the software in * the public domain is deemed null and void, then the software is Copyright * (c) 2009 Jim Fougeron and it is hereby released to the general public under * the following terms: * * This software may be modified, redistributed, and used for any purpose, * in source and binary forms, with or without modification. * * Cracks phpass 'portable' hashes, and phpBBv3 hashes, which are simply phpass * portable, with a slightly different signature. These are 8 byte salted * hashes, with a 1 byte 'salt' that defines the number of loops to compute. * Internally we work with 8 byte salt (the 'real' salt), but let john track * it as 9 byte salts to also pass in the loop count. Code works even if * multiple loop count values within the input. PHPv5 kicked up the loop * count, Wordpress uses same format, but even higher loop count. The loop * count can be used to 'tune' the format, by asking to process only * only hashes of a specific count. * * uses openSSL's MD5 and SIMD MD5. * * Code was pretty much rewritten to re-enable this format, and to deprecate * dynamic_17. It required ported to use the new intrisic SIMD code, including * AVX2, AVX2-512, and others, and the overall starting point for this older * code was pretty bad. This port done August 2015, Jim Fougeron. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_phpassmd5; #elif FMT_REGISTERS_H john_register_one(&fmt_phpassmd5); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "md5.h" #include "phpass_common.h" //#undef _OPENMP //#undef SIMD_COEF_32 //#undef SIMD_PARA_MD5 #ifdef _OPENMP #define OMP_SCALE 32 #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "phpass" #define FORMAT_NAME "" #define ALGORITHM_NAME "phpass ($P$ or $H$) " MD5_ALGORITHM_NAME #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5) #endif #define BENCHMARK_COMMENT " ($P$9)" #ifndef MD5_BUF_SIZ #define MD5_BUF_SIZ 16 #endif #define DIGEST_SIZE 16 #define SALT_SIZE 8 // NOTE salts are only 8 bytes, but we tell john they are 9. // We then take the 8 bytes of salt, and append the 1 byte of // loop count data, making it 9. #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*4*SIMD_COEF_32 ) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef SIMD_COEF_32 // hash with key appended (used on all steps other than first) static ARCH_WORD_32 (*hash_key)[MD5_BUF_SIZ*NBKEYS]; // salt with key appended (only used in 1st step). static ARCH_WORD_32 (*cursalt)[MD5_BUF_SIZ*NBKEYS]; static ARCH_WORD_32 (*crypt_key)[DIGEST_SIZE/4*NBKEYS]; static unsigned max_keys; #else static char (*crypt_key)[PHPASS_CPU_PLAINTEXT_LENGTH+1+PHPASS_BINARY_SIZE]; static char (*saved_key)[PHPASS_CPU_PLAINTEXT_LENGTH + 1]; static unsigned (*saved_len); static unsigned char cursalt[SALT_SIZE]; #endif static unsigned loopCnt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*crypt_key), MEM_ALIGN_SIMD); hash_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*hash_key), MEM_ALIGN_SIMD); cursalt = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*cursalt), MEM_ALIGN_SIMD); max_keys = self->params.max_keys_per_crypt; #else saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); #endif } static void done(void) { MEM_FREE(crypt_key); #ifndef SIMD_COEF_32 MEM_FREE(saved_len); MEM_FREE(saved_key); #else MEM_FREE(hash_key); MEM_FREE(cursalt); #endif } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 int i; ARCH_WORD_32 *p; p = cursalt[0]; for (i = 0; i < max_keys; ++i) { if (i && (i&(SIMD_COEF_32-1)) == 0) p += 15*SIMD_COEF_32; p[0] = ((ARCH_WORD_32 *)salt)[0]; p[SIMD_COEF_32] = ((ARCH_WORD_32 *)salt)[1]; ++p; } #else // !SIMD_COEF_32 memcpy(cursalt, salt, 8); #endif // compute the loop count for this salt loopCnt = (1 << (atoi64[ARCH_INDEX(((char*)salt)[8])])); } static void set_key(char *key, int index) { #ifdef SIMD_COEF_32 // in SIMD, we put the key into the cursalt (at offset 8), // and into hash_key (at offset 16). We also clean both // buffers, and put the 0x80, and the length into them. int len = strlen(key), i, j; unsigned char *co1 = (unsigned char*)cursalt; unsigned char *co2 = (unsigned char*)hash_key; for (i = 0; i < len; ++i) { // byte by byte. Slow but easy to follow, and the // speed here does not really matter. co1[GETPOS(i+8,index)] = key[i]; co2[GETPOS(i+16,index)] = key[i]; } // Place the end of string marker co1[GETPOS(i+8,index)] = 0x80; co2[GETPOS(i+16,index)] = 0x80; // clean out both buffers top parts. for (j = i+9; j < 56; ++j) co1[GETPOS(j,index)] = 0; for (j = i+17; j < 56; ++j) co2[GETPOS(j,index)] = 0; // set the length in bits of salt and hash co1[GETPOS(56,index)] = ((len+8)<<3)&0xFF; co2[GETPOS(56,index)] = ((len+16)<<3)&0xFF; co1[GETPOS(57,index)] = ((len+8)<<3)>>8; co2[GETPOS(57,index)] = ((len+16)<<3)>>8; #else int len= strlen(key); saved_len[index]=len; strcpy(saved_key[index], key); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_32 unsigned char *saltb8 = (unsigned char*)cursalt; static char out[PHPASS_CPU_PLAINTEXT_LENGTH+1]; int len, i; // get salt length (in bits) len = saltb8[GETPOS(57,index)]; len <<= 8; len |= saltb8[GETPOS(56,index)]; // convert to bytes. len >>= 3; // we skip the 8 bytes of salt (to get to password). len -= 8; // now grab the password. for (i = 0; i < len; ++i) out[i] = saltb8[GETPOS(8+i,index)]; out[i] = 0; return out; #else return saved_key[index]; #endif } static int cmp_all(void *binary, int count) { unsigned i = 0; #ifdef SIMD_COEF_32 ARCH_WORD_32 *p; ARCH_WORD_32 bin = *(ARCH_WORD_32 *)binary; p = crypt_key[0]; for (i = 0; i < count; ++i) { if (i && (i&(SIMD_COEF_32-1)) == 0) p += 3*SIMD_COEF_32; if (bin == *p++) return 1; } return 0; #else for (i = 0; i < count; i++) if (!memcmp(binary, crypt_key[i], PHPASS_BINARY_SIZE)) return 1; return 0; #endif } static int cmp_exact(char *source, int index) { return 1; } static int cmp_one(void * binary, int index) { #ifdef SIMD_COEF_32 int idx = index&(SIMD_COEF_32-1); int off = (index/SIMD_COEF_32)*(4*SIMD_COEF_32); return((((ARCH_WORD_32 *)binary)[0] == ((ARCH_WORD_32 *)crypt_key)[off+0*SIMD_COEF_32+idx]) && (((ARCH_WORD_32 *)binary)[1] == ((ARCH_WORD_32 *)crypt_key)[off+1*SIMD_COEF_32+idx]) && (((ARCH_WORD_32 *)binary)[2] == ((ARCH_WORD_32 *)crypt_key)[off+2*SIMD_COEF_32+idx]) && (((ARCH_WORD_32 *)binary)[3] == ((ARCH_WORD_32 *)crypt_key)[off+3*SIMD_COEF_32+idx])); #else return !memcmp(binary, crypt_key[index], PHPASS_BINARY_SIZE); #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int loops = 1, index; #ifdef _OPENMP loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT; #pragma omp parallel for #endif for (index = 0; index < loops; index++) { unsigned Lcount; #ifdef SIMD_COEF_32 SIMDmd5body(cursalt[index], hash_key[index], NULL, SSEi_OUTPUT_AS_INP_FMT); Lcount = loopCnt-1; do { SIMDmd5body(hash_key[index], hash_key[index], NULL, SSEi_OUTPUT_AS_INP_FMT); } while (--Lcount); // last hash goes into crypt_key SIMDmd5body(hash_key[index], crypt_key[index], NULL, 0); #else MD5_CTX ctx; MD5_Init( &ctx ); MD5_Update( &ctx, cursalt, 8 ); MD5_Update( &ctx, saved_key[index], saved_len[index] ); MD5_Final( (unsigned char *) crypt_key[index], &ctx); strcpy(((char*)&(crypt_key[index]))+PHPASS_BINARY_SIZE, saved_key[index]); Lcount = loopCnt; do { MD5_Init( &ctx ); MD5_Update( &ctx, crypt_key[index], PHPASS_BINARY_SIZE+saved_len[index]); MD5_Final( (unsigned char *)&(crypt_key[index]), &ctx); } while (--Lcount); #endif } return count; } static void * salt(char *ciphertext) { static union { unsigned char salt[SALT_SIZE+2]; ARCH_WORD_32 x; } x; unsigned char *salt = x.salt; // store off the 'real' 8 bytes of salt memcpy(salt, &ciphertext[4], 8); // append the 1 byte of loop count information. salt[8] = ciphertext[3]; salt[9]=0; return salt; } #ifdef SIMD_COEF_32 #define SIMD_INDEX (index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SIMD_COEF_32*4 static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_0; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_1; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_2; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_3; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_4; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_5; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_6; } #else static int get_hash_0(int index) { return ((ARCH_WORD_32*)(crypt_key[index]))[0] & PH_MASK_0; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)(crypt_key[index]))[0] & PH_MASK_1; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)(crypt_key[index]))[0] & PH_MASK_2; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)(crypt_key[index]))[0] & PH_MASK_3; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)(crypt_key[index]))[0] & PH_MASK_4; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)(crypt_key[index]))[0] & PH_MASK_5; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)(crypt_key[index]))[0] & PH_MASK_6; } #endif static int salt_hash(void *salt) { return *((ARCH_WORD *)salt) & 0x3FF; } struct fmt_main fmt_phpassmd5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PHPASS_CPU_PLAINTEXT_LENGTH, PHPASS_BINARY_SIZE, PHPASS_BINARY_ALIGN, SALT_SIZE+1, PHPASS_SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | #endif FMT_CASE | FMT_8_BIT, { "iteration count", }, { FORMAT_TAG, FORMAT_TAG2, FORMAT_TAG3 }, phpass_common_tests_39 }, { init, done, fmt_default_reset, phpass_common_prepare, phpass_common_valid, phpass_common_split, phpass_common_binary, salt, { phpass_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
oddeven.h
#ifndef ODDEVEN_H #define ODDEVEN_H typedef int numeric; int compare_exchange(numeric *a, numeric *b){ numeric tmp; if (*a > *b) { tmp = *a; *a = *b; *b = tmp; return 1; } return 0; } int oddeven(numeric *a, int n){ for (size_t i = 0; i < n; i++) { if (i%2 == 1) { for (size_t j = 0; j < n/2; j++) { compare_exchange(&a[2*j], &a[2*j+1]); } } else { for (size_t j = 0; j < n/2 - 1; j++) { compare_exchange(&a[2*j+1], &a[2*j+2]); } } } return 0; } int oddeven_anylenght(numeric *a, int n){ int r = n%2 - 1; // Used in the even step. for (size_t i = 0; i < n; i++) { if (i%2 == 1) { for (size_t j = 0; j < n/2; j++) { compare_exchange(&a[2*j], &a[2*j+1]); } } else { for (size_t j = 0; j < n/2 + r; j++) { compare_exchange(&a[2*j+1], &a[2*j+2]); } } } return 0; } int oddeven_stop(numeric *a, int n){ int r = n%2 - 1; int changed_odd = 0; int changed_even = 0; for (size_t i = 0; i < n; i++) { for (size_t j = 0; j < n/2; j++) { if (compare_exchange(&a[2*j], &a[2*j]+1)) { changed_odd = 1; } } for (size_t j = 0; j < n/2 + r; j++) { if (compare_exchange(&a[2*j+1], &a[2*j+2])) { changed_even = 1; } } if (!changed_odd && !changed_even) { return i+1; } changed_odd = changed_even = 0; } return n; } int oddeven_omp(numeric *a, int n){ int r = n%2 - 1; int changed_odd = 0; int changed_even = 0; #pragma omp parallel { for (size_t i = 0; i < n; i++) { #pragma omp for reduction(+:changed_odd) for (size_t j = 0; j < n/2; j++) { if (compare_exchange(&a[2*j], &a[2*j]+1)) { changed_odd = 1; } } // Implicit barrier here. #pragma omp for reduction(+:changed_even) for (size_t j = 0; j < n/2 + r; j++) { if (compare_exchange(&a[2*j+1], &a[2*j+2])) { changed_even = 1; } } if (!changed_odd && !changed_even) { break; } #pragma omp barrier #pragma omp single { changed_odd = changed_even = 0; } } } return 0; } int printvec(numeric *a, int n){ printf("[%d,", a[0]); for (size_t i = 1; i < n-1; i++) { printf(" %d,", a[i]); } printf(" %d]\n", a[n-1]); return 0; } #endif
GB_binop__eq_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint8) // A*D function (colscale): GB (_AxD__eq_uint8) // D*A function (rowscale): GB (_DxB__eq_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint8) // C=scalar+B GB (_bind1st__eq_uint8) // C=scalar+B' GB (_bind1st_tran__eq_uint8) // C=A+scalar GB (_bind2nd__eq_uint8) // C=A'+scalar GB (_bind2nd_tran__eq_uint8) // C type: bool // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT8 || GxB_NO_EQ_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mkldnn_common.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2019 by Contributors * \file mkldnn_common.h * \brief Common header file for MKLDNN backend subgraph * \author Ciyong Chen */ #ifndef MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_ #define MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_ #if MXNET_USE_ONEDNN == 1 #include <vector> namespace mxnet { namespace op { template <typename DType> static std::vector<float> GetWeightScales(const NDArray &weight, const NDArray *bias, const float data_scale, bool weight_channelwise_scale) { auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); std::vector<float> weight_scales; const DType *weight_ptr = weight.data().dptr<DType>(); const DType *bias_ptr = bias? bias->data().dptr<DType>() : nullptr; const auto wshape = weight.shape(); size_t channel = wshape[0]; size_t offset = wshape.ProdShape(1, wshape.ndim()); std::vector<DType> weight_c_min(channel, MaxValue<DType>()); std::vector<DType> weight_c_max(channel, MinValue<DType>()); for (int c = 0; c < static_cast<int>(channel); ++c) { const DType *p1 = weight_ptr + c * offset; for (size_t k = 0; k < offset; ++k) { if (weight_c_min[c] > p1[k]) weight_c_min[c] = p1[k]; if (weight_c_max[c] < p1[k]) weight_c_max[c] = p1[k]; } } if (weight_channelwise_scale) { weight_scales.resize(channel); #pragma omp parallel for num_threads(nthreads) for (int c = 0; c < static_cast<int>(channel); ++c) { float scale = GetQuantizeScale(mshadow::kInt8, weight_c_min[c], weight_c_max[c]); if (bias_ptr && bias_ptr[c]) { // avoid overflow on bias // TODO(zhennan): mkldnn has bug to handle INT_MAX in bias, so set the maximum value of bias // to INT_MAX / 2. float scale_max = static_cast<float>(bias_ptr[c] > 0 ? MaxValue<int32_t>() : MinValue<int32_t>()) / 2 / bias_ptr[c] / data_scale; scale = Min(scale, scale_max); } weight_scales[c] = scale; } } else { DType total_min = weight_c_min[0]; DType total_max = weight_c_max[0]; for (size_t c = 0; c < channel; ++c) { if (total_min > weight_c_min[c]) total_min = weight_c_min[c]; if (total_max < weight_c_max[c]) total_max = weight_c_max[c]; } weight_scales.resize(3); weight_scales[0] = GetQuantizeScale(mshadow::kInt8, total_min, total_max); weight_scales[1] = total_min; weight_scales[2] = total_max; } return weight_scales; } static void ConvertWeightBias2MKLDNN(NDArray *weight, NDArray *bias, bool has_bias, const mkldnn::memory::desc &weight_md, const mkldnn::memory::desc *bias_md, const int num_group, float data_scale, const std::vector<float> &weight_scales, const bool submit = true) { MKLDNNStream *stream = MKLDNNStream::Get(); const auto new_weight = NDArray(weight_md); const auto conv_weights_memory = new_weight.GetMKLDNNData(); mkldnn::primitive_attr weight_attr; if (weight_scales.size()) { const int weight_mask = (weight_scales.size()) == 1 ? 0 : 1; weight_attr.set_output_scales(weight_mask, weight_scales); } auto default_weights_memory = GetWeights(*weight, num_group); if (default_weights_memory == nullptr) default_weights_memory = weight->GetMKLDNNData(); const auto weight_reorder_pd = mkldnn::reorder::primitive_desc(*default_weights_memory, *conv_weights_memory, weight_attr); MKLDNNStream::Get()->RegisterPrimArgs( mkldnn::reorder(weight_reorder_pd), {{MKLDNN_ARG_FROM, *default_weights_memory}, {MKLDNN_ARG_TO, *conv_weights_memory}}); NDArray new_bias; if (has_bias && data_scale) { std::vector<float> bias_scales(weight_scales.size()); for (size_t c = 0; c < weight_scales.size(); ++c) { bias_scales[c] = weight_scales[c] * data_scale; } new_bias = NDArray(*bias_md); const auto conv_bias_memory = new_bias.GetMKLDNNData(); const int bias_mask = (bias_scales.size()) == 1 ? 0 : 1; mkldnn::primitive_attr bias_attr; bias_attr.set_output_scales(bias_mask, bias_scales); auto bias_weights_memory = bias->GetMKLDNNData(); const auto bias_reorder_pd = mkldnn::reorder::primitive_desc(*bias_weights_memory, *conv_bias_memory, bias_attr); MKLDNNStream::Get()->RegisterPrimArgs( mkldnn::reorder(bias_reorder_pd), {{MKLDNN_ARG_FROM, *bias_weights_memory}, {MKLDNN_ARG_TO, *conv_bias_memory}}); } if (submit) stream->Submit(); *weight = new_weight; if (has_bias && data_scale) *bias = new_bias; } } // namespace op } // namespace mxnet #endif // if MXNET_USE_ONEDNN == 1 #endif // MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_
GB_unaryop__identity_fp32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_uint32 // op(A') function: GB_tran__identity_fp32_uint32 // C type: float // A type: uint32_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_uint32 ( float *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
displacement_lagrangemultiplier_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; /// The epsilon tolerance definition static constexpr double Tolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /// Constructor. /** * @param DispRatioTolerance Relative tolerance for displacement error * @param DispAbsTolerance Absolute tolerance for displacement error * @param LMRatioTolerance Relative tolerance for lagrange multiplier error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false); // The displacement solution mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The contact solution mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // The default parameters Parameters default_parameters = Parameters(R"( { "ensure_contact" : false, "print_convergence_criterion" : false, "displacement_relative_tolerance" : 1.0e-4, "displacement_absolute_tolerance" : 1.0e-9, "contact_displacement_relative_tolerance" : 1.0e-4, "contact_displacement_absolute_tolerance" : 1.0e-9 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); // The displacement solution mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble(); // The contact solution mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, false); } // Copy constructor. DisplacementLagrangeMultiplierContactCriteria( DisplacementLagrangeMultiplierContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) { } /// Destructor. ~DisplacementLagrangeMultiplierContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something // Initialize TDataType disp_solution_norm = 0.0, lm_solution_norm = 0.0, disp_increase_norm = 0.0, lm_increase_norm = 0.0; IndexType disp_dof_num(0),lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType dof_value = 0.0, dof_incr = 0.0; // Loop over Dofs #pragma omp parallel for reduction(+:disp_solution_norm,lm_solution_norm,disp_increase_norm,lm_increase_norm,disp_dof_num,lm_dof_num,dof_id,dof_value,dof_incr) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); if (mActiveDofs[dof_id]) { dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; const auto curr_var = it_dof->GetVariable(); if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { lm_solution_norm += dof_value * dof_value; lm_increase_norm += dof_incr * dof_incr; lm_dof_num++; } else { disp_solution_norm += dof_value * dof_value; disp_increase_norm += dof_incr * dof_incr; disp_dof_num++; } } } if(disp_increase_norm < Tolerance) disp_increase_norm = 1.0; if(lm_increase_norm < Tolerance) lm_increase_norm = 1.0; if(disp_solution_norm < Tolerance) disp_solution_norm = 1.0; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm); const TDataType lm_ratio = lm_solution_norm > Tolerance ? std::sqrt(lm_increase_norm/lm_solution_norm) : 0.0; const TDataType disp_abs = std::sqrt(disp_increase_norm)/static_cast<TDataType>(disp_dof_num); const TDataType lm_abs = std::sqrt(lm_increase_norm)/static_cast<TDataType>(lm_dof_num); // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance; } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT(" LAGRANGE MUL:\tRATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << " LAGRANGE MUL:\tRATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } // We check if converged const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance); const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance); if (disp_converged && lm_converged) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H */
GB_binop__max_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__max_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__max_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__max_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint16) // A*D function (colscale): GB (_AxD__max_uint16) // D*A function (rowscale): GB (_DxB__max_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__max_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__max_uint16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint16) // C=scalar+B GB (_bind1st__max_uint16) // C=scalar+B' GB (_bind1st_tran__max_uint16) // C=A+scalar GB (_bind2nd__max_uint16) // C=A'+scalar GB (_bind2nd_tran__max_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_UINT16 || GxB_NO_MAX_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
openmp-ex00.c
/* Hello threads: adapted from Edmond Chow's OpenMP notes */ #include <stdio.h> int main(void) { printf ("You're all individuals!\n"); /* create a team of threads for the following structured block */ #pragma omp parallel { printf("Yes, we're all individuals!\n"); } /* team of threads join master thread after the structured block */ return 0; }
lsh_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ /*********************************************************************** * Author: Vincent Rabaud *************************************************************************/ #ifndef FLANN_LSH_INDEX_H_ #define FLANN_LSH_INDEX_H_ #include <sstream> #include <algorithm> #include <cassert> #include <cstring> #include <map> #include <vector> #include "flann/general.h" #include "flann/algorithms/nn_index.h" #include "flann/util/matrix.h" #include "flann/util/result_set.h" #include "flann/util/heap.h" #include "flann/util/lsh_table.h" #include "flann/util/allocator.h" #include <PrintProgress.hpp> #include "flann/util/random.h" #include "flann/util/saving.h" #include <flann/algorithms/dist.h> namespace flann { struct LshIndexParams : public IndexParams { LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2) { (* this)["algorithm"] = FLANN_INDEX_LSH; // The number of hash tables to use (*this)["table_number"] = table_number; // The length of the key in the hash tables (*this)["key_size"] = key_size; // Number of levels to use in multi-probe (0 for standard LSH) (*this)["multi_probe_level"] = multi_probe_level; } }; /** * Locality-sensitive hashing index * * Contains the tables and other information for indexing a set of points * for nearest-neighbor matching. */ template<typename Distance> class LshIndex : public NNIndex<Distance> { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; typedef NNIndex<Distance> BaseClass; /** Constructor * @param params parameters passed to the LSH algorithm * @param d the distance used */ LshIndex(const IndexParams& params = LshIndexParams(), Distance d = Distance()) : BaseClass(params, d) { table_number_ = get_param<unsigned int>(index_params_,"table_number",12); key_size_ = get_param<unsigned int>(index_params_,"key_size",20); multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2); fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); } /** Constructor * @param input_data dataset with the input features * @param params parameters passed to the LSH algorithm * @param d the distance used */ LshIndex(const Matrix<ElementType>& input_data, const IndexParams& params = LshIndexParams(), Distance d = Distance()) : BaseClass(params, d) { table_number_ = get_param<unsigned int>(index_params_,"table_number",12); key_size_ = get_param<unsigned int>(index_params_,"key_size",20); multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2); fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); setDataset(input_data); } LshIndex(const LshIndex& other) : BaseClass(other), tables_(other.tables_), table_number_(other.table_number_), key_size_(other.key_size_), multi_probe_level_(other.multi_probe_level_), xor_masks_(other.xor_masks_) { } LshIndex& operator=(LshIndex other) { this->swap(other); return *this; } virtual ~LshIndex() { freeIndex(); } BaseClass* clone() const { return new LshIndex(*this); } using BaseClass::buildIndex; void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { assert(points.cols==veclen_); size_t old_size = size_; extendDataset(points); if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) { buildIndex(); } else { for (unsigned int i = 0; i < table_number_; ++i) { lsh::LshTable<ElementType>& table = tables_[i]; for (size_t i=old_size;i<size_;++i) { table.add(i, points_[i]); } } } } flann_algorithm_t getType() const { return FLANN_INDEX_LSH; } template<typename Archive> void serialize(Archive& ar) { ar.setObject(this); ar & *static_cast<NNIndex<Distance>*>(this); ar & table_number_; ar & key_size_; ar & multi_probe_level_; ar & xor_masks_; ar & tables_; if (Archive::is_loading::value) { index_params_["algorithm"] = getType(); index_params_["table_number"] = table_number_; index_params_["key_size"] = key_size_; index_params_["multi_probe_level"] = multi_probe_level_; } } void saveIndex(FILE* stream) { serialization::SaveArchive sa(stream); sa & *this; } void loadIndex(FILE* stream) { serialization::LoadArchive la(stream); la & *this; } /** * Computes the index memory usage * Returns: memory used by the index */ int usedMemory() const { return size_ * sizeof(int); } /** * \brief Perform k-nearest neighbor search * \param[in] queries The query points for which to find the nearest neighbors * \param[out] indices The indices of the nearest neighbors found * \param[out] dists Distances to the nearest neighbors found * \param[in] knn Number of nearest neighbors to return * \param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen_); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); int count = 0; if (params.use_heap==FLANN_True) { #pragma omp parallel num_threads(params.cores) { KNNUniqueResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { /* if (i % 100 == 0 || i == queries.rows - 1) { PrintProgress::printProgress(1.0 * (i + 1) / queries.rows); } */ resultSet.clear(); clock_t startTime = clock(); findNeighbors(resultSet, queries[i], params); //printf("%d/%d, findNeighbors, %d ms\n", i+1, queries.rows, 1000L*(clock()-startTime)/CLOCKS_PER_SEC); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } printf("\n"); } } return count; } /** * \brief Perform k-nearest neighbor search * \param[in] queries The query points for which to find the nearest neighbors * \param[out] indices The indices of the nearest neighbors found * \param[out] dists Distances to the nearest neighbors found * \param[in] knn Number of nearest neighbors to return * \param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen_); if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (params.use_heap==FLANN_True) { #pragma omp parallel num_threads(params.cores) { KNNUniqueResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, knn); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * Find set of nearest neighbors to vec. Their indices are stored inside * the result object. * * Params: * result = the result object in which the indices of the nearest-neighbors are stored * vec = the vector for which to search the nearest neighbors * maxCheck = the maximum number of restarts (in a best-bin-first manner) * knn = the count of neighbors need to find */ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/) const { getNeighbors(vec, result); } protected: /** * Builds the index */ void buildIndexImpl() { tables_.resize(table_number_); std::vector<std::pair<size_t,ElementType*> > features; features.reserve(points_.size()); for (size_t i=0;i<points_.size();++i) { features.push_back(std::make_pair(i, points_[i])); } for (unsigned int i = 0; i < table_number_; ++i) { lsh::LshTable<ElementType>& table = tables_[i]; table = lsh::LshTable<ElementType>(veclen_, key_size_); // Add the features to the table table.add(features); } } void freeIndex() { /* nothing to do here */ } private: /** Defines the comparator on score and index */ typedef std::pair<float, unsigned int> ScoreIndexPair; struct SortScoreIndexPairOnSecond { bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const { return left.second < right.second; } }; /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH * @param key the key we build neighbors from * @param lowest_index the lowest index of the bit set * @param level the multi-probe level we are at * @param xor_masks all the xor mask */ void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level, std::vector<lsh::BucketKey>& xor_masks) { xor_masks.push_back(key); if (level == 0) return; for (int index = lowest_index - 1; index >= 0; --index) { // Create a new key lsh::BucketKey new_key = key | (lsh::BucketKey(1) << index); fill_xor_mask(new_key, index, level - 1, xor_masks); } } /** Performs the approximate nearest-neighbor search. * This is a slower version than the above as it uses the ResultSet * @param vec the feature to analyze */ void getNeighbors(const ElementType* vec, ResultSet<DistanceType>& result) const { typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin(); std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end(); for (; xor_mask != xor_mask_end; ++xor_mask) { size_t sub_key = key ^ (*xor_mask); const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); DistanceType hamming_distance; // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { if (removed_ && removed_points_.test(*training_index)) continue; // Compute the Hamming distance hamming_distance = distance_(vec, points_[*training_index], veclen_); result.addPoint(hamming_distance, *training_index); } } } } void swap(LshIndex& other) { BaseClass::swap(other); std::swap(tables_, other.tables_); std::swap(size_at_build_, other.size_at_build_); std::swap(table_number_, other.table_number_); std::swap(key_size_, other.key_size_); std::swap(multi_probe_level_, other.multi_probe_level_); std::swap(xor_masks_, other.xor_masks_); } /** The different hash tables */ std::vector<lsh::LshTable<ElementType> > tables_; /** table number */ unsigned int table_number_; /** key size */ unsigned int key_size_; /** How far should we look for neighbors in multi-probe LSH */ unsigned int multi_probe_level_; /** The XOR masks to apply to a key to get the neighboring buckets */ std::vector<lsh::BucketKey> xor_masks_; USING_BASECLASS_SYMBOLS }; template<> void LshIndex<L2_Simple<float>>::getNeighbors(const float* vec, ResultSet<float>& result) const { typename std::vector<lsh::LshTable<float> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<float> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); //printf("key:%d\n", key); const lsh::Bucket* bucket = table->getBucketFromKey(key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { // Compute the Euclidean distance float euclidean_distance = distance_(vec, points_[*training_index], veclen_); //printf("euclidean_distance:%f\n", euclidean_distance); result.addPoint(euclidean_distance, *training_index); } } } } #endif //FLANN_LSH_INDEX_H_
enforcer.c
int foo () { return 3; } int main () { int x = 10; #pragma omp parallel x++; #pragma omp for for (x = 0; x < 10; x++) x = x; #pragma omp sections { #pragma omp section x++; #pragma omp section x+=2; } #pragma omp single #pragma omp task #pragma omp parallel for for (x = 0; x < 12; x++) x = x + 0; #pragma omp parallel sections { #pragma omp section x++; #pragma omp section x+=2; } #pragma omp master x++; #pragma omp critical x++; #pragma omp ordered x++; if (x == x) x++; switch (x) case 1: x; while (x) x++; do x++; while (x == x); for (x = 10; x < 10; x++) x++; if (x == x) x++; else x--; }
stencil.h
#include "multicore.h" #define index1D(i) (i) #define index2D(i,j) (((j)*(core->coreArrayNeighborhoodSizes_2D[1][1][0]))+(i)) // I think this should be in terms of the size for X and Y, not X, Y, and Z! // #define index3D(i,j,k) (((k)*core->coreArrayNeighborhoodSizes_3D[1][1][1][2]*core->coreArrayNeighborhoodSizes_3D[1][1][1][1])+((j)*core->coreArrayNeighborhoodSizes_3D[1][1][1][0])+(i)) #define index3D(i,j,k) (((k)*(core->coreArrayNeighborhoodSizes_3D[1][1][1][0])*(core->coreArrayNeighborhoodSizes_3D[1][1][1][1]))+((j)*(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]))+(i)) // And we need another macro for the general case where the memory segment is a different size than coreArrayNeighborhoodSizes_2D[1][1][0] in the X (and Y axis) // since the lenght of the data in each axis can be different along the same axis of the core array. These macros take the length of the array in the requires // axis to properly referne the element on the associated "other core". #define otherCore_index2D(i,j,sizeX) (((j)*sizeX)+(i)) #define otherCore_index3D(i,j,k,sizeX,sizeY) (((k)*sizeX*sizeY)+((j)*sizeX)+(i)) template <typename T> void relax2D( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This is a working example of the relaxation associated with the a stencil on the array abstraction // mapped to the separate multi-dimensional memorys allocated per core and onto a multi-dimenional // array of cores (core array). int numberOfCores = array.get_numberOfCores(); // Macro to support linearization of multi-dimensional 2D array index computation #define local_index2D(i,j) (((j)*sizeX)+(i)) // Use OpenMP to support the threading... #pragma omp parallel for for (int core = 0; core < numberOfCores; core++) { // This lifts out loop invariant portions of the code. T* arraySection = array.get_arraySectionPointers()[core]; T* old_arraySection = old_array.get_arraySectionPointers()[core]; // Lift out loop invariant local array size values. int sizeX = array.get_coreArray()[core]->coreArrayNeighborhoodSizes_2D[1][1][0]; int sizeY = array.get_coreArray()[core]->coreArrayNeighborhoodSizes_2D[1][1][1]; for (int j = 1; j < sizeY-1; j++) { for (int i = 1; i < sizeX-1; i++) { // This is the dominant computation for each array section per core. The compiler // will use the user's code to derive the code that will be put here. arraySection[local_index2D(i,j)] = (old_arraySection[local_index2D(i-1,j)] + old_arraySection[local_index2D(i+1,j)] + old_arraySection[local_index2D(i,j-1)] + old_arraySection[local_index2D(i,j+1)]) / 4.0; } } // We could alternatively generate the call for relaxation for the internal // boundaries in the same loop (reduces syncronization). array.get_coreArray()[core]->relax_on_boundary(core,array,old_array); } // Relax on the edges of the array sections on each core (use the alternative approach). // relax2D_on_boundary(array,old_array); // undefine the local 2D index support macro #undef local_index2D } template <typename T> void relax2D_on_boundary( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This function supports the relaxation operator on the internal boundaries // of the different arrays allocated on a per core basis. We take advantage // of shared memory to support the stencil operations. int numberOfCores = array.get_numberOfCores(); #pragma omp parallel for for (int core = 0; core < numberOfCores; core++) { array.get_coreArray()[core]->relax_on_boundary(core,array,old_array); } } template <typename T> void relax3D_midlevel( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This is a working example of the relaxation associated with the a stencil on the array abstraction // mapped to the separate multi-dimensional memorys allocated per core and onto a multi-dimenional // array of cores (core array). // Note: As an alternative to the specialized side handling for internal boundary updates // consider a loop over all of the array while skipping the interior regions for better // performance. This could use the general technique demonstrated above for general // internal core edge updates without any significant loss in parformance (maybe). // This might permit more general internal application of the stencil operator to the // edges of array sections on each core. Such code might be more easily generated then // the more complex form of edge code in the much larger functions (below). int numberOfCores_X = array.get_coreArraySize(0); int numberOfCores_Y = array.get_coreArraySize(1); int numberOfCores_Z = array.get_coreArraySize(2); // Use OpenMP to support the threading... #pragma omp parallel for for (int core_X = 0; core_X < numberOfCores_X; core_X++) { //#pragma omp for for (int core_Y = 0; core_Y < numberOfCores_Y; core_Y++) { for (int core_Z = 0; core_Z < numberOfCores_Z; core_Z++) { // This lifts out loop invariant portions of the code. Core<T> & coreMemory = array.getCore(core_X,core_Y,core_Z); // Lift out loop invariant local array size values. int sizeX = array.getCore(core_X,core_Y,core_Z).coreArrayNeighborhoodSizes_3D[1][1][1][0]; int sizeY = array.getCore(core_X,core_Y,core_Z).coreArrayNeighborhoodSizes_3D[1][1][1][1]; int sizeZ = array.getCore(core_X,core_Y,core_Z).coreArrayNeighborhoodSizes_3D[1][1][1][2]; #if 0 printf ("\nsizeX = %d sizeY = %d sizeZ = %d \n",sizeX,sizeY,sizeZ); #endif int base_X = (coreMemory.boundaryCore_3D[0][0] == true) ? 1 : 0; int bound_X = (coreMemory.boundaryCore_3D[0][1] == true) ? sizeX - 2: sizeX - 1; int base_Y = (coreMemory.boundaryCore_3D[1][0] == true) ? 1 : 0; int bound_Y = (coreMemory.boundaryCore_3D[1][1] == true) ? sizeY - 2: sizeY - 1; int base_Z = (coreMemory.boundaryCore_3D[2][0] == true) ? 1 : 0; int bound_Z = (coreMemory.boundaryCore_3D[2][1] == true) ? sizeZ - 2: sizeZ - 1; #if 0 printf ("core_X = %d core_Y = %d core_Z = %d base_X = %d bound_X = %d base_Y = %d bound_Y = %d base_Z = %d bound_Z = %d\n",core_X,core_Y,core_Z,base_X,bound_X,base_Y,bound_Y,base_Z, bound_Z); #endif for (int k = base_Z; k <= bound_Z; k++) { for (int j = base_Y; j <= bound_Y; j++) { for (int i = base_X; i <= bound_X; i++) { #if 0 printf ("\ncore_X = %d core_Y = %d i = %d j = %d \n",core_X,core_Y,i,j); printf (" array.getCore(core_X,core_Y,core_Z)(i,j,k) = %f \n",array.getCore(core_X,core_Y,core_Z)(i,j,k)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i-1,j,k) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i-1,j,k)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i+1,j,k) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i+1,j,k)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i,j-1,k) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i,j-1,k)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i,j+1,k) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i,j+1,k)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i,j,k-1) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i,j-1,k-1)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i,j,k+1) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i,j+1,k+1)); #endif array.getCore(core_X,core_Y,core_Z)(i,j,k) = ( old_array.getCore(core_X,core_Y,core_Z)(i-1,j,k) + old_array.getCore(core_X,core_Y,core_Z)(i+1,j,k) + old_array.getCore(core_X,core_Y,core_Z)(i,j-1,k) + old_array.getCore(core_X,core_Y,core_Z)(i,j+1,k) + old_array.getCore(core_X,core_Y,core_Z)(i,j,k-1) + old_array.getCore(core_X,core_Y,core_Z)(i,j,k+1)) / 6.0; } } } } } } } template <typename T> void relax2D_midlevel( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This is a working example of the relaxation associated with the a stencil on the array abstraction // mapped to the separate multi-dimensional memorys allocated per core and onto a multi-dimenional // array of cores (core array). // Note: As an alternative to the specialized side handling for internal boundary updates // consider a loop over all of the array while skipping the interior regions for better // performance. This could use the general technique demonstrated above for general // internal core edge updates without any significant loss in parformance (maybe). // This might permit more general internal application of the stencil operator to the // edges of array sections on each core. Such code might be more easily generated then // the more complex form of edge code in the much larger functions (below). int numberOfCores_X = array.get_coreArraySize(0); int numberOfCores_Y = array.get_coreArraySize(1); // Use OpenMP to support the threading... #pragma omp parallel for for (int core_X = 0; core_X < numberOfCores_X; core_X++) { //#pragma omp for for (int core_Y = 0; core_Y < numberOfCores_Y; core_Y++) { // This lifts out loop invariant portions of the code. Core<T> & coreMemory = array.getCore(core_X,core_Y,0); // Lift out loop invariant local array size values. int sizeX = array.getCore(core_X,core_Y,0).coreArrayNeighborhoodSizes_2D[1][1][0]; int sizeY = array.getCore(core_X,core_Y,0).coreArrayNeighborhoodSizes_2D[1][1][1]; #if 0 printf ("\nsizeX = %d sizeY = %d \n",sizeX,sizeY); #endif int base_X = (coreMemory.boundaryCore_2D[0][0] == true) ? 1 : 0; int bound_X = (coreMemory.boundaryCore_2D[0][1] == true) ? sizeX - 2: sizeX - 1; int base_Y = (coreMemory.boundaryCore_2D[1][0] == true) ? 1 : 0; int bound_Y = (coreMemory.boundaryCore_2D[1][1] == true) ? sizeY - 2: sizeY - 1; #if 0 printf ("core_X = %d core_Y = %d base_X = %d bound_X = %d base_Y = %d bound_Y = %d \n",core_X,core_Y,base_X,bound_X,base_Y,bound_Y); #endif for (int j = base_Y; j <= bound_Y; j++) { for (int i = base_X; i <= bound_X; i++) { #if 0 printf ("\ncore_X = %d core_Y = %d i = %d j = %d \n",core_X,core_Y,i,j); printf ("array.getCore(core_X,core_Y,0)(i,j,0) = %f \n",array.getCore(core_X,core_Y,0)(i,j,0)); printf ("old_array.getCore(core_X,core_Y,0)(i-1,j,0) = %f \n",old_array.getCore(core_X,core_Y,0)(i-1,j,0)); printf ("old_array.getCore(core_X,core_Y,0)(i+1,j,0) = %f \n",old_array.getCore(core_X,core_Y,0)(i+1,j,0)); printf ("old_array.getCore(core_X,core_Y,0)(i,j-1,0) = %f \n",old_array.getCore(core_X,core_Y,0)(i,j-1,0)); printf ("old_array.getCore(core_X,core_Y,0)(i,j+1,0) = %f \n",old_array.getCore(core_X,core_Y,0)(i,j+1,0)); #endif array.getCore(core_X,core_Y,0)(i,j,0) = ( old_array.getCore(core_X,core_Y,0)(i-1,j,0) + old_array.getCore(core_X,core_Y,0)(i+1,j,0) + old_array.getCore(core_X,core_Y,0)(i,j-1,0) + old_array.getCore(core_X,core_Y,0)(i,j+1,0) ) / 4.0; } } } } } template <typename T> void relax3D_highlevel( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This is a working example of a 3D stencil demonstrating a high level interface // suitable only as debugging support. #pragma omp parallel for for (int k = 1; k < array.get_arraySize(2)-1; k++) { for (int j = 1; j < array.get_arraySize(1)-1; j++) { for (int i = 1; i < array.get_arraySize(0)-1; i++) { array(i,j,k) = ( old_array(i-1,j,k) + old_array(i+1,j,k) + old_array(i,j-1,k) + old_array(i,j+1,k) + old_array(i,j,k-1) + old_array(i,j,k+1)) / 6.0; } } } } template <typename T> void relax2D_highlevel( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This is a working example of a 3D stencil demonstrating a high level interface // suitable only as debugging support. //#pragma omp parallel for // for (int k = 1; k < array.get_arraySize(2)-1; k++) // { #pragma omp parallel for for (int j = 1; j < array.get_arraySize(1)-1; j++) { for (int i = 1; i < array.get_arraySize(0)-1; i++) { array(i,j,0) = ( old_array(i-1,j,0) + old_array(i+1,j,0) + old_array(i,j-1,0) + old_array(i,j+1,0)) / 4.0; } } // } } // ********************************************************************** template <typename T> void relax( int coreID, MulticoreArray<T> & array, MulticoreArray<T> & old_array, int dist) { // assert(multicoreArray != NULL); const int arraySizeX = array.get_arraySize(0); const int arraySizeY = array.get_arraySize(1); const int arraySizeZ = array.get_arraySize(2); int p = coreID; Core<T>* core = array.coreArray[coreID]; // This lifts out loop invariant portions of the code. T* arraySection = array.get_arraySectionPointers()[p]; T* old_arraySection = old_array.get_arraySectionPointers()[p]; #if 0 array.display("before relaxation on interior"); #endif assert(array.get_coreArraySize(0) == array.get_coreArraySize(0)); assert(array.get_coreArraySize(1) == array.get_coreArraySize(1)); assert(array.get_coreArraySize(2) == array.get_coreArraySize(2)); #if 0 printf ("\nIterate over all cores: p = %d arraySection = %p old_arraySection = %p \n",p,arraySection,old_arraySection); #endif assert(arraySection != NULL); assert(old_arraySection != NULL); #if 0 printf ("array.get_tableBasedDistribution() = %s \n",array.get_tableBasedDistribution() ? "true" : "false"); #endif #if 1 if (arraySizeZ > 2) { if (arraySizeY > 2 && arraySizeX > 2) { // This is the case of 3D relaxation #if 0 printf ("This is the case of 3D relaxation \n"); // Iterate on the interior of the section (non-shared memory operation, local to the closest local memory declared for each core). printf ("This needs to use sectionSize[0-2] to get the local size instead of the global size! \n"); #endif // This is required to avoid valgrind reported errors on some blocks where the local (sectionSize[dim]) is zero. // This is likely because of over flow from size_t type varaibles. assert(core->coreArrayNeighborhoodSizes_3D[1][1][1][0] >= 0); assert(core->coreArrayNeighborhoodSizes_3D[1][1][1][1] >= 0); assert(core->coreArrayNeighborhoodSizes_3D[1][1][1][2] >= 0); int LBX = (array.hasAttachedHalo() && core->boundaryCore_3D[0][0]) ? (dist+array.get_haloWidth(0)) : dist; int LBY = (array.hasAttachedHalo() && core->boundaryCore_3D[1][0]) ? (dist+array.get_haloWidth(1)) : dist; int LBZ = (array.hasAttachedHalo() && core->boundaryCore_3D[2][0]) ? (dist+array.get_haloWidth(2)) : dist; int UBX = (array.hasAttachedHalo() && core->boundaryCore_3D[0][1]) ? (core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist-array.get_haloWidth(0)) : (core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist); int UBY = (array.hasAttachedHalo() && core->boundaryCore_3D[1][1]) ? (core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist-array.get_haloWidth(1)) : (core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist); int UBZ = (array.hasAttachedHalo() && core->boundaryCore_3D[2][1]) ? (core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist-array.get_haloWidth(2)) : (core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist); if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > (2*dist) && core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > (2*dist) && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > (2*dist)) { for (int k = LBZ; k < UBZ; k++) { for (int j = LBY; j < UBY; j++) { for (int i = LBX; i < UBX; i++) { // This is the dominant computation for each array section per core. The compiler will use the // user's code to derive the code that will be put here. #if 0 printf ("p= %d Indexing 3D array (i,j,k) = (%d,%d,%d) \n",p,i,j,k); #endif #if 0 arraySection[index3D(i,j,k)] = (old_arraySection[index3D(i-1,j,k)] + old_arraySection[index3D(i+1,j,k)] + old_arraySection[index3D(i,j-1,k)] + old_arraySection[index3D(i,j+1,k)] + old_arraySection[index3D(i,j,k-1)] + old_arraySection[index3D(i,j,k+1)]) / 6.0; #else T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += (old_arraySection[index3D(i-d,j,k)] + old_arraySection[index3D(i+d,j,k)] + old_arraySection[index3D(i,j-d,k)] + old_arraySection[index3D(i,j+d,k)] + old_arraySection[index3D(i,j,k-d)] + old_arraySection[index3D(i,j,k+d)]); } arraySection[index3D(i,j,k)] = tmp / (6.0*dist); #endif } } } } } else { #if 0 printf ("3D array too small (still no interior) \n"); #endif } } else { if (arraySizeZ == 2) { #if 0 printf ("3D array (with size 2 in Z axis) too small (still no interior) \n"); #endif } else { if (arraySizeY > 2) { if (arraySizeX > 2) { // This is the case of 2D relaxation #if 0 printf ("This is the case of 2D relaxation (interior) p = %d \n",p); printf ("core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d core->coreArrayNeighborhoodSizes_2D[1][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1]); #endif int LBX = (array.hasAttachedHalo() && core->boundaryCore_2D[0][0]) ? (dist+array.get_haloWidth(0)) : dist; int LBY = (array.hasAttachedHalo() && core->boundaryCore_2D[1][0]) ? (dist+array.get_haloWidth(1)) : dist; int UBX = (array.hasAttachedHalo() && core->boundaryCore_2D[0][1]) ? (core->coreArrayNeighborhoodSizes_2D[1][1][0]-dist-array.get_haloWidth(0)) : (core->coreArrayNeighborhoodSizes_2D[1][1][0]-dist); int UBY = (array.hasAttachedHalo() && core->boundaryCore_2D[1][1]) ? (core->coreArrayNeighborhoodSizes_2D[1][1][1]-dist-array.get_haloWidth(1)) : (core->coreArrayNeighborhoodSizes_2D[1][1][1]-dist); // The core array may higher dimensional then the array and if so then the local size along // the Z axis may be zero. If so, then we don't want to process the local array section. // if (sectionSize[2] == 1) // if (sectionSize[0] > 2 && sectionSize[1] > 2 && sectionSize[2] == 1) // if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > 2 && core->coreArrayNeighborhoodSizes_2D[1][1][1] > 2) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > (dist*2) && core->coreArrayNeighborhoodSizes_2D[1][1][1] > (dist*2) && core->coreArrayNeighborhoodSizes_2D[1][1][2] == 1) { for (int j = LBY; j < UBY; j++) { for (int i = LBX; i < UBX; i++) { // This is the dominant computation for each array section per core. The compiler will use the // user's code to derive the code that will be put here. #if 1 T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += (old_arraySection[index2D(i-1,j)] + old_arraySection[index2D(i+1,j)] + old_arraySection[index2D(i,j-1)] + old_arraySection[index2D(i,j+1)]); } arraySection[index2D(i,j)] = tmp / (4.0 * dist); #endif } } } } else { #if 0 printf ("2D array too small (still no interior) \n"); #endif } } else { if (arraySizeY == 2) { #if 0 printf ("2D array (with size 2 in Y axis) too small (still no interior) \n"); #endif } else { if (arraySizeX > 2) { // This is the case of 1D relaxation #if 0 printf ("This is the case of 1D relaxation sectionSize[0] = %d \n",sectionSize[0]); #endif // The core array may higher dimensional then the array and if so then the local size along either // the Y or Z axis may be zero. If so, then we don't want to process the local array section. // if (sectionSize[1] == 1 && sectionSize[2] == 1) // if (sectionSize[0] > 2 && sectionSize[1] == 1 && sectionSize[2] == 1) // if (sectionSize[0] > 0 && ((sectionSize[1] == 1 && sectionSize[2] == 1) || array.get_tableBasedDistribution() == false)) if (core->coreArrayNeighborhoodSizes_1D[1][0] > 0 && (core->coreArrayNeighborhoodSizes_1D[1][1] == 1 && core->coreArrayNeighborhoodSizes_1D[1][2] == 1)) { for (int i = 1; i < core->coreArrayNeighborhoodSizes_1D[1][0]-1; i++) { // This is the dominant computation for each array section per core. The compiler will use the // user's code to derive the code that will be put here. #if 0 printf ("i = %d old_arraySection[index1D(i-1)=%d] = %f \n",i,index1D(i-1),arraySection[index1D(i-1)]); printf ("i = %d old_arraySection[index1D(i+1)=%d] = %f \n",i,index1D(i+1),arraySection[index1D(i+1)]); #endif #if 1 T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += (old_arraySection[index1D(i-1)] + old_arraySection[index1D(i+1)]) / 2.0; } arraySection[index1D(i)] = tmp / (2.0*dist); #endif #if 0 printf ("arraySection[index1D(i=%d)=%d] = %f \n",i,index1D(i),arraySection[index1D(i)]); #endif } } else { #if 0 printf ("The local size for this arraySection is zero in either the Y or Z axis sectionSize[1] = %d sectionSize[2] = %d \n",sectionSize[1],sectionSize[2]); #endif } } else { // This is array does not have an interior upon which to relax. #if 0 printf ("1D array too small (still no interior) \n"); #endif } } } } } #endif #if 0 array.display("after relaxation on interior: array"); // old_array.display("after relaxation on interior: old_array"); #endif } template <typename T> void relax_on_boundary( int coreID, MulticoreArray<T> & array, MulticoreArray<T> & old_array, int dist ) { // assert(multicoreArray != NULL); const int arraySizeX = array.get_arraySize(0); const int arraySizeY = array.get_arraySize(1); const int arraySizeZ = array.get_arraySize(2); int p = coreID; Core<T>* core = array.coreArray[coreID]; // This lifts out loop invariant portions of the code. T** arraySectionPointers = array.get_arraySectionPointers(); T** old_arraySectionPointers = old_array.get_arraySectionPointers(); assert(arraySectionPointers != NULL); assert(old_arraySectionPointers != NULL); T* arraySection = array.get_arraySectionPointers()[p]; T* old_arraySection = old_array.get_arraySectionPointers()[p]; #if 0 printf ("\nIterate over all cores: p = %d arraySection = %p old_arraySection = %p \n",p,arraySection,old_arraySection); #endif assert(arraySection != NULL); assert(old_arraySection != NULL); #if 1 // ************************************************************** // Fixup internal bounaries of the memory allocated to each core. // ************************************************************** #if 0 printf ("Fixup boundaries: p = %d Array size (%d,%d,%d) sectionSize(%d,%d,%d) coreArray(%d,%d,%d) \n",p,arraySizeX,arraySizeY,arraySizeZ,sectionSize[0],sectionSize[1],sectionSize[2],array.get_coreArraySize(0),array.get_coreArraySize(1),array.get_coreArraySize(2)); #endif if (arraySizeZ > (2*dist)) { if (arraySizeY > (2*dist) && arraySizeX > (2*dist)) { // This is the case of 3D relaxation #if 0 printf ("This is the case of 3D relaxation \n"); // Iterate on the interior of the section (non-shared memory operation, local to the closest local memory declared for each core). printf ("This needs to use sectionSize[0-2] to get the local size instead of the global size! \n"); #endif if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_3D[1][1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] >= 1) { // *************************************** // Now process the edges along the X axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1)) { if (core->boundaryCore_3D[1][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's UPPER boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d - idx),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,(idx-d),k)] ) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,(idx+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idx,k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idx,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idx,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idx,k+d)]); } arraySection[index3D(i,idx,k)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[1][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's BOTTOM boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += (/* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idx-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k+d)]); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] = tmp / (6.0*dist); } } } } } } else { /**TODO: adding special case for X size or Z size is only 1**/ } // *************************************** // Now process the edges along the Y axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1)) { if (core->boundaryCore_3D[0][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's LEFT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idx,j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idx,j+d,k)] + /* array[Z][Y][X-1] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idx),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idx-d,j,k)])+ /* array[Z][Y][X+1] */ old_arraySection[index3D(idx+d,j,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idx,j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idx,j,k+d)]); } arraySection[index3D(idx,j,k)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[0][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's RIGHT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j+d,k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1+d),j,k)] + /* array[Z][Y][X+1] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idx-1),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1-d),j,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k+d)]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k)] = tmp / (6.0*dist); } } } } } } else { } // *************************************** // Now process the edges along the Z axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1)) { if (core->boundaryCore_3D[2][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's LEFT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,idx)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,idx)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,idx)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,idx)] + /* array[Z-1][Y][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idx),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,j,idx-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,j,idx+d)]); } arraySection[index3D(i,j,idx)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[2][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's RIGHT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1+d))] + /* array[Z+1][Y][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,j,(d-idx-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1-d))])); } arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] = tmp / (6.0*dist); } } } } } } else { } // ******************** // End of plane updates // ******************** // ******************** // Edge updates along X axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if ((core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,idxy-d,idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,idxy,idxz-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idxy,idxz+d)]); } arraySection[index3D(i,idxy,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,idxy-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // Edge updates along Y axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),j,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,j,(idxz+d))]); } arraySection[index3D(idxx,j,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),j,core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,j,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),j,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz+d))]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),j,core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),j,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // Edge updates along Z axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,idxy-d,k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,idxy+d,k)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,k+d)]); } arraySection[index3D(idxx,idxy,k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d > idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ ((d > idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k+d)]) ; } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // End of edge updates // ******************** // ******************** // corners updates // ******************** if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,idxy,(idxz-d))] ) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,(idxz+d))] ) ; } arraySection[index3D(idxx,idxy,idxz)] = tmp / (6.0*dist) ; } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idxx-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D(idxx+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[2][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz+d))] ); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))] ); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[2][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } // ******************** // End of corner updates // ******************** } else { #if 0 printf ("This array segment can't be processed for edge handling because it is too small in at least one axis: p = %d size = (%d,%d,%d) \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // assert(false); } #if 0 // This is required to avoid valgrind reported errors on some blocks where the local (sectionSize[dim]) is zero. // This is likely because of over flow from size_t type veraibles. if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 2 && core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 2 && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 2) { for (int k = 1; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-1; k++) { for (int j = 1; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-1; j++) { for (int i = 1; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-1; i++) { // This is the dominant computation for each array section per core. The compiler will use the // user's code to derive the code that will be put here. #if 0 printf ("p= %d Indexing 3D array (i,j,k) = (%d,%d,%d) \n",p,i,j,k); #endif #if 0 arraySection[index3D(i,j,k)] = (old_arraySection[index3D(i-1,j-1,k-1)] + old_arraySection[index3D(i+1,j-1,k-1)] + old_arraySection[index3D(i-1,j+1,k-1)] + old_arraySection[index3D(i+1,j+1,k-1)] + old_arraySection[index3D(i-1,j-1,k+1)] + old_arraySection[index3D(i+1,j-1,k+1)] + old_arraySection[index3D(i-1,j+1,k+1)] + old_arraySection[index3D(i+1,j+1,k+1)]) / 8.0; #endif } } } } #endif } else { #if 0 printf ("3D array too small (still no interior) \n"); #endif } } else { if (arraySizeZ == 2) { #if 0 printf ("3D array (with size 2 in Z axis) too small (still no interior) \n"); #endif } else { if (arraySizeY > 2) { if (arraySizeX > 2) { // This is the case of 2D relaxation (along edges) #if 0 printf ("This is the case of 2D relaxation \n"); printf ("This needs to use sectionSize[0-1] to get the local size instead of the global size! \n"); #endif #if 1 // The core array may higher dimensional then the array and if so then the local size along // the Z axis may be zero. If so, then we don't want to process the local array section. // if ((core->coreArrayNeighborhoodSizes_2D[1][1][0] >= 2 || core->coreArrayNeighborhoodSizes_2D[1][1][1] >= 2) && core->coreArrayNeighborhoodSizes_2D[1][1][2] == 1) if ((core->coreArrayNeighborhoodSizes_2D[1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_2D[1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_2D[1][1][2] == 1) { // Handle the internal boundary equations along edges of the 2D arrays. // *************************************** // Now process the edges along the X axis. // *************************************** // if (sectionSize[1] > 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { #if 0 printf ("-- leftEdgeSection[1] = %s rightEdgeSection[1] = %s \n",leftEdgeSection[1] ? "true" : "false",rightEdgeSection[1] ? "true" : "false"); #endif // if (leftEdgeSection[1] == true) if (core->boundaryCore_2D[1][0] == true) { #if 0 printf ("--- Apply the 2D array abstraction's UPPER boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. #if 0 printf ("apply 2D equation at left edge of memory segment core->coreArrayNeighborhoodSizes_2D[0][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[0][1][1]); #endif // if (previous_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0) { // Upper edge // ***** | ****** | ***** // ---------------------- // ***** | *XXXX* | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][previous_sectionSize[0]-1] + old_arraySection[1]) / 2.0; // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { // arraySection[index2D(i,0)] = (old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(i-1,previous_sectionSize[1]-1)] + old_arraySection[index2D(i-1,1)] + // old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(i+1,previous_sectionSize[1]-1)] + old_arraySection[index2D(i+1,1)]) / 4.0; arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(i,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(i,1)] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; } } } // if (rightEdgeSection[1] == true) if (core->boundaryCore_2D[1][1] == true) { #if 0 printf ("--- Apply the array abstraction's LOWER boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply 2D equation at right edge of memory segment core->coreArrayNeighborhoodSizes_2D[2][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[2][1][1]); #endif // if (next_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[2][1][1] > 0) { // Lower edge // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | *XXXX* | ***** // ---------------------- // ***** | ****** | ***** // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(i,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; } } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[1] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { #if 0 printf ("--- Trivial case of only one 2D equation (define this to be UPPER edge) \n"); printf ("--- core->boundaryCore_2D[1][0] = %s core->boundaryCore_2D[1][1] = %s \n",core->boundaryCore_2D[1][0] ? "true" : "false",core->boundaryCore_2D[1][1] ? "true" : "false"); #endif // if (leftEdgeSection[1] == false && rightEdgeSection[1] == false) if (core->boundaryCore_2D[1][0] == false && core->boundaryCore_2D[1][1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[1] > 0 && next_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0 && core->coreArrayNeighborhoodSizes_2D[2][1][1] > 0) { // Upper and Lower edges are the same // ***** | ****** | ***** // ---------------------- // ***** | *XXXX* | ***** // ---------------------- // ***** | ****** | ***** #if 0 printf ("--- Processing trivial case of only one equation 2D (edge in X axis) \n"); #endif // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(i,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(i,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; } } } } else { // assert(sectionSize[1] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][1] == 0); #if 0 printf ("--- core->coreArrayNeighborhoodSizes_2D[1][1][1] == 0: This is the trival case \n"); #endif } } #if 1 // *************************************** // Now process the edges along the Y axis. // *************************************** #if 0 printf ("---+++ Process the edges of the memory section on core index = %d sectionSize[0] = %d previous_sectionSize[0] = %d next_sectionSize[0] = %d \n",p,sectionSize[0],previous_sectionSize[0],next_sectionSize[0]); #endif // if (sectionSize[0] > 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > 1) { #if 0 printf ("---+++ leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == true) if (core->boundaryCore_2D[0][0] == true) { #if 0 printf ("---+++ Apply the array abstraction's LEFT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_leftEdge = (left_stencil_cell_leftEdge + right_stencil_cell_leftEdge) / 2.0; #if 0 printf ("apply equation at left edge of memory segment core->coreArrayNeighborhoodSizes_2D[1][0][0] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][0][0]); #endif // if (previous_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0) { // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | X***** | ***** // ***** | X***** | ***** // ***** | X***** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { #if 1 arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,j)]) / 4.0; #endif } } } // if (rightEdgeSection[0] == true) if (core->boundaryCore_2D[0][1] == true) { #if 0 printf ("---+++ Apply the array abstraction's RIGHT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply equation at right edge of memory segment core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][2][0]); #endif // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | *****X | ***** // ***** | *****X | ***** // ***** | *****X | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { // arraySection[index2D(sectionSize[0]-1,j)] = (old_arraySection[index2D(sectionSize[0]-2,j-1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j-1)] + // old_arraySection[index2D(sectionSize[0]-2,j+1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j+1)]) / 4.0; #if 0 printf ("array[Y][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)]); printf ("array[Y-1][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)]); printf ("array[Y+1][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)]); printf ("array[Y][X-1]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)]); printf ("p = %d core->coreArrayNeighborhoodLinearized_2D[1][2] = %d \n",p,core->coreArrayNeighborhoodLinearized_2D[1][2]); printf ("p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0]); printf ("p = %d core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][2][0]); // printf ("array[Y][X+1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)] = %f \n",old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]); printf ("array[Y][X+1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])] = %f \n", old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]); #endif #if 1 // This fails for some random problem... arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[0] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1) { #if 0 printf ("---+++ Trivial case of only one equation (define this to be left edge; use the associated references) \n"); printf ("---+++ leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0 && core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // ***** | * | ***** // ---------------------- // ***** | * | ***** // ***** | X | ***** // ***** | X | ***** // ***** | X | ***** // ***** | * | ***** // ---------------------- // ***** | * | ***** #if 0 printf ("---+++ Processing trivial case of only one equation \n"); #endif // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { // arraySection[index2D(0,j)] = (old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(previous_sectionSize[0]-1,j-1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j-1)] + // old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(previous_sectionSize[0]-1,j+1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j+1)]) / 4.0; #if 1 arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } } else { // assert(sectionSize[0] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 0); #if 0 printf ("---+++ core->coreArrayNeighborhoodSizes_2D[1][0][0] == 0: This is the trival case \n"); #endif } } // ******************** // End of Y Axis update // ******************** #endif #if 1 // ******************************************** // Now process the corners of the X and Y axis. // ******************************************** #if 0 printf ("---+++ Process the edges of the memory section on core p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d core->coreArrayNeighborhoodSizes_2D[1][0][0] = %d core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n", p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][0][0],core->coreArrayNeighborhoodSizes_2D[1][2][0]); printf ("Sizes of current processor: core->coreArrayNeighborhoodSizes_2D[1][1] = (%d,%d,%d) \n",core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // First X Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > 1) { // Left sice corners if (core->boundaryCore_2D[0][0] == true) { // processor boundary condition enforced here (X axis) } else { if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0) { // Next Y Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { // Upper corner if (core->boundaryCore_2D[1][0] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][0] > 0); assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); // Upper left corner // ***** | ****** | ***** // ---------------------- // ***** | X***** | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** #if 1 arraySection[index2D(0,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(1,0)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(0,1)]) / 4.0; #endif } // Lower corner if (core->boundaryCore_2D[1][1] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); assert (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0); // Lower left corner // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | X***** | ***** // ---------------------- // ***** | ****** | ***** #if 0 printf ("--- array[Y][X]: arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); printf ("old_array[Y][X]: old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); printf ("old_array[Y-1][X]: old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] = %f \n",old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)]); printf ("old_array[Y+1][X]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)] = %f \n",old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)]); printf ("old_array[Y][X-1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)] = %f \n", old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)]); printf ("array[Y][X+1]: old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); #endif #if 1 arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; #endif #if 0 printf ("--- array[Y][X]: arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); #endif } } else { // printf ("core->coreArrayNeighborhoodSizes_2D[1][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // Case of upper and lower left corners are the same point // ***** | ****** | ***** // ---------------------- // ***** | X***** | ***** // ---------------------- // ***** | ****** | ***** #if 1 arraySection[index2D(0,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,0)]) / 4.0; #endif } } } else { printf ("We don't support the size on the adjacent being zero! \n"); assert(false); } } // Right side corners if (core->boundaryCore_2D[0][1] == true) { // Can we test if this is realy a boundary? } else { // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // printf ("Right boundary corner not implemented! \n"); // Next Y Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { // Upper corner if (core->boundaryCore_2D[1][0] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][0] > 0); assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); // Upper right corner // ***** | ****** | ***** // ---------------------- // ***** | *****X | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** #if 1 arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } // Lower corner if (core->boundaryCore_2D[1][1] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); assert (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0); // Lower right corner // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | *****X | ***** // ---------------------- // ***** | ****** | ***** #if 1 arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,core->coreArrayNeighborhoodSizes_2D[2][1][1]-1)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[2][1][1]-1,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } else { // printf ("core->coreArrayNeighborhoodSizes_2D[1][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // Case of upper and lower right corners are the same point // ***** | ****** | ***** // ---------------------- // ***** | *****X | ***** // ---------------------- // ***** | ****** | ***** #if 1 arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } else { printf ("We don't support the size on the adjacent being zero! \n"); assert(false); } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). #if 0 printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][0] == %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][0]); printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][1] == %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); #endif // assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1); // assert(core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1); // if (sectionSize[0] == 1) // if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1 && core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1 && core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1\n"); // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) // if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false) if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false && core->boundaryCore_2D[1][0] == false && core->boundaryCore_2D[1][1] == false) { // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0 && core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // printf ("Case of single point boundary not implemented! \n"); // ***** | * | ***** // ----------------- // ***** | X | ***** // ----------------- // ***** | * | ***** #if 1 arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } #if 0 printf ("Exiting as a test! \n"); assert(false); #endif } } else { // assert(sectionSize[0] == 0); if (core->coreArrayNeighborhoodSizes_2D[1][1][0] != 0) { #if 0 printf ("Warning: p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0]); #endif } // assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] <= 1); #if 0 printf ("---+++ core->coreArrayNeighborhoodSizes_2D[1][0][0] == 0: This is the trival case \n"); #endif } } // ************************************************** // End of processing the corners of the X and Y axis. // ************************************************** #endif } else { #if 0 printf ("This array segment can't be processed for edge handling because it is too small in at least one axis: p = %d size = (%d,%d,%d) \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // assert(false); } #endif } else { #if 0 printf ("2D array too small (still no interior) \n"); #endif } } else { if (arraySizeY == 2) { #if 0 printf ("2D array (with size 2 in Y axis) too small (still no interior) \n"); #endif } else { if (arraySizeX > 2) { // This is the case of 1D relaxation #if 0 printf ("--- This is the case of 1D relaxation sectionSize[0] = %d \n",sectionSize[0]); #endif // The core array may higher dimensional then the array and if so then the local size along either // the Y or Z axis may be zero. If so, then we don't want to process the local array section. // if (sectionSize[1] == 1 && sectionSize[2] == 1) // if (sectionSize[0] > 0 && ((sectionSize[1] == 1 && sectionSize[2] == 1) || array.get_tableBasedDistribution() == false)) // if (sectionSize[0] > 0 && (sectionSize[1] == 1 && sectionSize[2] == 1) ) if (core->coreArrayNeighborhoodSizes_1D[1][0] > 0 && (core->coreArrayNeighborhoodSizes_1D[1][1] == 1 && core->coreArrayNeighborhoodSizes_1D[1][2] == 1) ) { #if 0 printf ("--- Process the edges of the memory section on core index = %d sectionSize[0] = %d previous_sectionSize[0] = %d next_sectionSize[0] = %d \n",p,sectionSize[0],previous_sectionSize[0],next_sectionSize[0]); #endif // if (sectionSize[0] > 1) if (core->coreArrayNeighborhoodSizes_1D[1][0] > 1) { #if 0 printf ("-- leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == true) if (core->boundaryCore_1D[0] == true) { #if 0 printf ("--- Apply the array abstraction's LEFT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_leftEdge = (left_stencil_cell_leftEdge + right_stencil_cell_leftEdge) / 2.0; #if 0 printf ("apply equation at left edge of memory segment previous_sectionSize[0] = %d \n",previous_sectionSize[0]); #endif // if (previous_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[0][0] > 0) { // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySection[1]) / 2.0; arraySection[0] = (old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[0]][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySection[1]) / 2.0; } } // if (rightEdgeSection[0] == true) if (core->boundaryCore_1D[1] == true) { #if 0 printf ("--- Apply the array abstraction's RIGHT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply equation at right edge of memory segment next_sectionSize[0] = %d \n",next_sectionSize[0]); #endif // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[2][0] > 0) { // arraySection[sectionSize[0]-1] = (old_arraySection[sectionSize[0]-2] + old_arraySectionPointers[next_coreIndexInLinearArray][0]) / 2.0; arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-1] = (old_arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-2] + old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[2]][0]) / 2.0; } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[0] == 1) if (core->coreArrayNeighborhoodSizes_1D[1][0] == 1) { #if 0 printf ("--- Trivial case of only one equation (define this to be left edge; use the associated references) \n"); printf ("--- leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) if (core->boundaryCore_1D[0] == false && core->boundaryCore_1D[1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[0][0] > 0 && core->coreArrayNeighborhoodSizes_1D[2][0] > 0) { #if 0 printf ("--- Processing trivial case of only one equation \n"); #endif // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][previous_sectionSize[0]-1] + old_arraySectionPointers[next_coreIndexInLinearArray][0]) / 2.0; arraySection[0] = (old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[0]][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[2]][0]) / 2.0; } } } else { // assert(sectionSize[0] == 0); assert(core->coreArrayNeighborhoodSizes_1D[1][0] == 0); #if 0 printf ("--- sectionSize[0] == 0: This is the trival case \n"); #endif } } } else { #if 0 printf ("--- The local size for this arraySection is zero in either the Y or Z axis sectionSize[1] = %d sectionSize[2] = %d \n",sectionSize[1],sectionSize[2]); #endif } } else { // This is array does not have an interior upon which to relax. #if 0 printf ("--- 1D array too small (still no interior) \n"); #endif } } } } } #endif #if 0 array.display("after relaxation on memory section edges: array"); old_array.display("after relaxation on memory section edges: old_array"); #endif } // ********************************************************************* // Original boundary relaxation without Halo regions in local memory template <typename T> void relax_on_boundary_simplified( int coreID, MulticoreArray<T> & array, MulticoreArray<T> & old_array, int dist ) { const int arraySizeX = array.get_arraySize(0); const int arraySizeY = array.get_arraySize(1); const int arraySizeZ = array.get_arraySize(2); int p = coreID; Core<T>* core = array.coreArray[coreID]; // This lifts out loop invariant portions of the code. T** arraySectionPointers = array.get_arraySectionPointers(); T** old_arraySectionPointers = old_array.get_arraySectionPointers(); assert(arraySectionPointers != NULL); assert(old_arraySectionPointers != NULL); T* arraySection = array.get_arraySectionPointers()[p]; T* old_arraySection = old_array.get_arraySectionPointers()[p]; #if 0 printf ("\nIterate over all cores: p = %d arraySection = %p old_arraySection = %p \n",p,arraySection,old_arraySection); #endif assert(arraySection != NULL); assert(old_arraySection != NULL); // ************************************************************** // Fixup internal bounaries of the memory allocated to each core. // ************************************************************** #if 0 printf ("Fixup boundaries: p = %d Array size (%d,%d,%d) sectionSize(%d,%d,%d) coreArray(%d,%d,%d) \n",p,arraySizeX,arraySizeY,arraySizeZ,sectionSize[0],sectionSize[1],sectionSize[2],array.get_coreArraySize(0),array.get_coreArraySize(1),array.get_coreArraySize(2)); #endif if (arraySizeZ > (2*dist)) { if (arraySizeY > (2*dist) && arraySizeX > (2*dist)) { // This is the case of 3D relaxation #if 0 printf ("This is the case of 3D relaxation \n"); // Iterate on the interior of the section (non-shared memory operation, local to the closest local memory declared for each core). printf ("This needs to use sectionSize[0-2] to get the local size instead of the global size! \n"); #endif if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_3D[1][1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] >= 1) { int base_X = (core->boundaryCore_3D[0][0] == true) ? dist : 0; int bound_X = (core->boundaryCore_3D[0][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][0] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][0]; int base_Y = (core->boundaryCore_3D[1][0] == true) ? dist : 0; int bound_Y = (core->boundaryCore_3D[1][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][1] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][1]; int base_Z = (core->boundaryCore_3D[2][0] == true) ? dist : 0; int bound_Z = (core->boundaryCore_3D[2][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][2] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][2]; for (int k = base_Z; k < bound_Z; k++) { for (int j = base_Y; j < bound_Y; j++) { for (int i = base_X; i < bound_X; i++) { if((i >= dist) && (i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist) && (j >= dist) && (j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist) && (k >= dist) && (k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist)) continue; T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y][X-d] */ ((i-d < 0) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]+(i-d),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(i-d,j,k)]) + /* array[Z][Y][X+d] */ ((i+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][0]) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((i+d-core->coreArrayNeighborhoodSizes_3D[1][1][2][0]),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(i+d,j,k)]) + /* array[Z][Y-d][X] */ ((j-d < 0) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]+(j-d),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,j-d,k)]) + /* array[Z][Y+d][X] */ ((j+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][1]) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(j+d-core->coreArrayNeighborhoodSizes_3D[1][2][1][2]),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,j+d,k)]) + /* array[Z-d][Y][X] */ ((k-d < 0) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]+(k-d),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,j,k-d)]) + /* array[Z+d][Y][X] */ ((k+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][2]) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,j,(k+d-core->coreArrayNeighborhoodSizes_3D[2][1][1][2]),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,j,k+d)]) ); } arraySection[index3D(i,j,k)] = tmp / (6.0*dist); } } } } } } } // ********************************************************************* // New boundary relaxation with DetachedHalo regions in local memory // This is applied only when user construct MulticoreArray with postive halo region size. template <typename T> void relax_on_detachedhalo_boundary( int coreID, MulticoreArray<T> & array, MulticoreArray<T> & old_array, int dist ) { const int arraySizeX = array.get_arraySize(0); const int arraySizeY = array.get_arraySize(1); const int arraySizeZ = array.get_arraySize(2); int p = coreID; Core<T>* core = array.coreArray[coreID]; // This lifts out loop invariant portions of the code. T** arraySectionPointers = array.get_arraySectionPointers(); T** old_arraySectionPointers = old_array.get_arraySectionPointers(); assert(arraySectionPointers != NULL); assert(old_arraySectionPointers != NULL); T* arraySection = array.get_arraySectionPointers()[p]; T* old_arraySection = old_array.get_arraySectionPointers()[p]; #if 0 printf ("\nIterate over all cores: p = %d arraySection = %p old_arraySection = %p \n",p,arraySection,old_arraySection); #endif assert(arraySection != NULL); assert(old_arraySection != NULL); #if 1 // ************************************************************** // Fixup internal bounaries of the memory allocated to each core. // ************************************************************** #if 0 printf ("Fixup boundaries: p = %d Array size (%d,%d,%d) sectionSize(%d,%d,%d) coreArray(%d,%d,%d) \n",p,arraySizeX,arraySizeY,arraySizeZ,sectionSize[0],sectionSize[1],sectionSize[2],array.get_coreArraySize(0),array.get_coreArraySize(1),array.get_coreArraySize(2)); #endif if (arraySizeZ > (2*dist)) { if (arraySizeY > (2*dist) && arraySizeX > (2*dist)) { // This is the case of 3D relaxation #if 0 printf ("This is the case of 3D relaxation \n"); // Iterate on the interior of the section (non-shared memory operation, local to the closest local memory declared for each core). printf ("This needs to use sectionSize[0-2] to get the local size instead of the global size! \n"); #endif if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_3D[1][1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] >= 1) { T** old_haloXBottom = old_array.get_haloSectionPointers(0,0); T** old_haloXTop = old_array.get_haloSectionPointers(0,1); T** old_haloYBottom = old_array.get_haloSectionPointers(1,0); T** old_haloYTop = old_array.get_haloSectionPointers(1,1); T** old_haloZBottom = old_array.get_haloSectionPointers(2,0); T** old_haloZTop = old_array.get_haloSectionPointers(2,1); // *************************************** // Now process the edges along the X axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1)) { if (core->boundaryCore_3D[1][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's UPPER boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idx) ? old_haloYBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-(d-idx)),k,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,(idx-d),k)] ) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,(idx+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idx,k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idx,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idx,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idx,k+d)]); else tmp += ( /* array[Z][Y-1][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d - idx),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,(idx-d),k)] ) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,(idx+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idx,k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idx,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idx,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idx,k+d)]); } arraySection[index3D(i,idx,k)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[1][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's BOTTOM boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID]) tmp += (/* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idx) ? old_haloYTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(d-idx-1),k,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k+d)]); else tmp += (/* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idx-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k+d)]); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] = tmp / (6.0*dist); } } } } } } else { /**TODO: adding special case for X size or Z size is only 1**/ } // *************************************** // Now process the edges along the Y axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1)) { if (core->boundaryCore_3D[0][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's LEFT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idx,j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idx,j+d,k)] + /* array[Z][Y][X-1] */ ((d>idx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idx)),j,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idx-d,j,k)])+ /* array[Z][Y][X+1] */ old_arraySection[index3D(idx+d,j,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idx,j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idx,j,k+d)]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idx,j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idx,j+d,k)] + /* array[Z][Y][X-1] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idx),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idx-d,j,k)])+ /* array[Z][Y][X+1] */ old_arraySection[index3D(idx+d,j,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idx,j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idx,j,k+d)]); } arraySection[index3D(idx,j,k)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[0][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's RIGHT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j+d,k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1+d),j,k)] + /* array[Z][Y][X+1] */ ((d>idx) ? old_haloXTop[coreID][otherCore_index3D((d-idx-1),j,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1-d),j,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k+d)]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j+d,k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1+d),j,k)] + /* array[Z][Y][X+1] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idx-1),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1-d),j,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k+d)]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k)] = tmp / (6.0*dist); } } } } } } else { } // *************************************** // Now process the edges along the Z axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1)) { if (core->boundaryCore_3D[2][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's LEFT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,idx)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,idx)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,idx)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,idx)] + /* array[Z-1][Y][X] */ ((d>idx) ? old_haloZBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(j+old_array.get_haloWidth(1)),(old_array.get_haloWidth(2)-(d-idx)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,j,idx-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,j,idx+d)]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,idx)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,idx)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,idx)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,idx)] + /* array[Z-1][Y][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idx),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,j,idx-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,j,idx+d)]); } arraySection[index3D(i,j,idx)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[2][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's RIGHT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1+d))] + /* array[Z+1][Y][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,j,(d-idx-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1+d))] + /* array[Z+1][Y][X] */ ((d>idx) ? old_haloZTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(j+old_array.get_haloWidth(1)),(d-idx-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1-d))])); } arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] = tmp / (6.0*dist); } } } } } } else { } // ******************** // End of plane updates // ******************** // ******************** // Edge updates along X axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if ((core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-(d-idxy)),idxz,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,idxy-d,idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),old_array.get_haloWidth(1)+idxy,(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,idxy,idxz-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idxy,idxz+d)]); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,idxy-d,idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,idxy,idxz-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idxy,idxz+d)]); } arraySection[index3D(i,idxy,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-(d-idxy)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,idxy-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)+idxy),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,idxy-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(d-idxy-1),idxz,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[1][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(d-idxy-1),(core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[1][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // Edge updates along Y axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),j,idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)+idxx),(j+old_array.get_haloWidth(1)),(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(idxx,j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,j,(idxz+d))]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),j,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,j,(idxz+d))]); } arraySection[index3D(idxx,j,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),j,(core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1)),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(j+old_array.get_haloWidth(1)),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),j,core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,j,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),j,idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(j+old_array.get_haloWidth(1)),(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz+d))]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),j,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz+d))]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),j,(core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1)),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(j+old_array.get_haloWidth(1)),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),j,core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),j,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // Edge updates along Z axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(old_array.get_haloWidth(1)-(d-idxy)),k,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,idxy-d,k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,idxy+d,k)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),idxy,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,k+d)]); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,idxy-d,k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,idxy+d,k)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,k+d)]); } arraySection[index3D(idxx,idxy,k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d > idxy) ? old_haloYTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(d-idxy-1),k,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ ((d > idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d > idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ ((d > idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(old_array.get_haloWidth(1)-(d-idxy)),k,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),idxy,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k+d)]) ; else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k+d)]) ; } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(d-idxy-1),k,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),(core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1)),k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // End of edge updates // ******************** // ******************** // corners updates // ******************** if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(old_array.get_haloWidth(1)-(d-idxy)),idxz,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),idxy,idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,old_array.get_haloWidth(1)+idxy,(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(0)))] : old_arraySection[index3D(idxx,idxy,(idxz-d))] ) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,(idxz+d))] ) ; else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,idxy,(idxz-d))] ) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,(idxz+d))] ) ; } arraySection[index3D(idxx,idxy,idxz)] = tmp / (6.0*dist) ; } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(old_array.get_haloWidth(1)-(d-idxy)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,old_array.get_haloWidth(1)+idxy,(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(d-idxy-1),idxz,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),(core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1)),idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idxx-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D(idxx+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),old_array.get_haloWidth(2)-(d-idxz),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idxx-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D(idxx+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(d-idxy-1),(core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),(core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1)),(core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1)),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[2][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(old_array.get_haloWidth(1)-(d-idxy)),idxz,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),idxy,idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),old_array.get_haloWidth(1)+idxy,(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz+d))] ); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz+d))] ); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(old_array.get_haloWidth(1)-(d-idxy)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),idxy,(core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1)),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),old_array.get_haloWidth(1)+idxy,(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(d-idxy-1),idxz,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),(core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1)),idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))] ); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))] ); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(d-idxy-1),(core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),(core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1)),(core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1)),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(0)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[2][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } // ******************** // End of corner updates // ******************** } else { #if 0 printf ("This array segment can't be processed for edge handling because it is too small in at least one axis: p = %d size = (%d,%d,%d) \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // assert(false); } #if 0 // This is required to avoid valgrind reported errors on some blocks where the local (sectionSize[dim]) is zero. // This is likely because of over flow from size_t type veraibles. if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 2 && core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 2 && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 2) { for (int k = 1; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-1; k++) { for (int j = 1; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-1; j++) { for (int i = 1; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-1; i++) { // This is the dominant computation for each array section per core. The compiler will use the // user's code to derive the code that will be put here. #if 0 printf ("p= %d Indexing 3D array (i,j,k) = (%d,%d,%d) \n",p,i,j,k); #endif #if 0 arraySection[index3D(i,j,k)] = (old_arraySection[index3D(i-1,j-1,k-1)] + old_arraySection[index3D(i+1,j-1,k-1)] + old_arraySection[index3D(i-1,j+1,k-1)] + old_arraySection[index3D(i+1,j+1,k-1)] + old_arraySection[index3D(i-1,j-1,k+1)] + old_arraySection[index3D(i+1,j-1,k+1)] + old_arraySection[index3D(i-1,j+1,k+1)] + old_arraySection[index3D(i+1,j+1,k+1)]) / 8.0; #endif } } } } #endif } else { #if 0 printf ("3D array too small (still no interior) \n"); #endif } } else { if (arraySizeZ == 2) { #if 0 printf ("3D array (with size 2 in Z axis) too small (still no interior) \n"); #endif } else { if (arraySizeY > 2) { if (arraySizeX > 2) { // This is the case of 2D relaxation (along edges) T** old_haloXBottom = old_array.get_haloSectionPointers(0,0); T** old_haloXTop = old_array.get_haloSectionPointers(0,1); T** old_haloYBottom = old_array.get_haloSectionPointers(1,0); T** old_haloYTop = old_array.get_haloSectionPointers(1,1); #if 0 printf ("This is the case of 2D relaxation \n"); printf ("This needs to use sectionSize[0-1] to get the local size instead of the global size! \n"); #endif #if 1 // The core array may higher dimensional then the array and if so then the local size along // the Z axis may be zero. If so, then we don't want to process the local array section. // if ((core->coreArrayNeighborhoodSizes_2D[1][1][0] >= 2 || core->coreArrayNeighborhoodSizes_2D[1][1][1] >= 2) && core->coreArrayNeighborhoodSizes_2D[1][1][2] == 1) if ((core->coreArrayNeighborhoodSizes_2D[1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_2D[1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_2D[1][1][2] == 1) { // Handle the internal boundary equations along edges of the 2D arrays. // *************************************** // Now process the edges along the X axis. // *************************************** // if (sectionSize[1] > 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { #if 0 printf ("-- leftEdgeSection[1] = %s rightEdgeSection[1] = %s \n",leftEdgeSection[1] ? "true" : "false",rightEdgeSection[1] ? "true" : "false"); #endif // if (leftEdgeSection[1] == true) if (core->boundaryCore_2D[1][0] == true) { #if 0 printf ("--- Apply the 2D array abstraction's UPPER boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. #if 0 printf ("apply 2D equation at left edge of memory segment core->coreArrayNeighborhoodSizes_2D[0][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[0][1][1]); #endif // if (previous_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0) { // Upper edge // ***** | ****** | ***** // ---------------------- // ***** | *XXXX* | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][previous_sectionSize[0]-1] + old_arraySection[1]) / 2.0; // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { // arraySection[index2D(i,0)] = (old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(i-1,previous_sectionSize[1]-1)] + old_arraySection[index2D(i-1,1)] + // old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(i+1,previous_sectionSize[1]-1)] + old_arraySection[index2D(i+1,1)]) / 4.0; if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]]) arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_arraySection[index2D(i,1)] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; else arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(i,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(i,1)] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; } } } // if (rightEdgeSection[1] == true) if (core->boundaryCore_2D[1][1] == true) { #if 0 printf ("--- Apply the array abstraction's LOWER boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply 2D equation at right edge of memory segment core->coreArrayNeighborhoodSizes_2D[2][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[2][1][1]); #endif // if (next_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[2][1][1] > 0) { // Lower edge // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | *XXXX* | ***** // ---------------------- // ***** | ****** | ***** // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]]) arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D(i+old_array.get_haloWidth(0),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; else arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(i,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; } } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[1] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { #if 0 printf ("--- Trivial case of only one 2D equation (define this to be UPPER edge) \n"); printf ("--- core->boundaryCore_2D[1][0] = %s core->boundaryCore_2D[1][1] = %s \n",core->boundaryCore_2D[1][0] ? "true" : "false",core->boundaryCore_2D[1][1] ? "true" : "false"); #endif // if (leftEdgeSection[1] == false && rightEdgeSection[1] == false) if (core->boundaryCore_2D[1][0] == false && core->boundaryCore_2D[1][1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[1] > 0 && next_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0 && core->coreArrayNeighborhoodSizes_2D[2][1][1] > 0) { // Upper and Lower edges are the same // ***** | ****** | ***** // ---------------------- // ***** | *XXXX* | ***** // ---------------------- // ***** | ****** | ***** #if 0 printf ("--- Processing trivial case of only one equation 2D (edge in X axis) \n"); #endif // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]] && old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]]) arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_haloYBottom[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(i+old_array.get_haloWidth(0),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_haloYTop[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(i+old_array.get_haloWidth(0),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; else arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(i,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(i,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; } } } } else { // assert(sectionSize[1] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][1] == 0); #if 0 printf ("--- core->coreArrayNeighborhoodSizes_2D[1][1][1] == 0: This is the trival case \n"); #endif } } #if 1 // *************************************** // Now process the edges along the Y axis. // *************************************** #if 0 printf ("---+++ Process the edges of the memory section on core index = %d sectionSize[0] = %d previous_sectionSize[0] = %d next_sectionSize[0] = %d \n",p,sectionSize[0],previous_sectionSize[0],next_sectionSize[0]); #endif // if (sectionSize[0] > 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > 1) { #if 0 printf ("---+++ leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == true) if (core->boundaryCore_2D[0][0] == true) { #if 0 printf ("---+++ Apply the array abstraction's LEFT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_leftEdge = (left_stencil_cell_leftEdge + right_stencil_cell_leftEdge) / 2.0; #if 0 printf ("apply equation at left edge of memory segment core->coreArrayNeighborhoodSizes_2D[1][0][0] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][0][0]); #endif // if (previous_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0) { // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | X***** | ***** // ***** | X***** | ***** // ***** | X***** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { #if 1 if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]]) arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),j,(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_arraySection[index2D(1,j)]) / 4.0; else arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,j)]) / 4.0; #endif } } } // if (rightEdgeSection[0] == true) if (core->boundaryCore_2D[0][1] == true) { #if 0 printf ("---+++ Apply the array abstraction's RIGHT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply equation at right edge of memory segment core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][2][0]); #endif // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | *****X | ***** // ***** | *****X | ***** // ***** | *****X | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { // arraySection[index2D(sectionSize[0]-1,j)] = (old_arraySection[index2D(sectionSize[0]-2,j-1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j-1)] + // old_arraySection[index2D(sectionSize[0]-2,j+1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j+1)]) / 4.0; #if 0 printf ("array[Y][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)]); printf ("array[Y-1][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)]); printf ("array[Y+1][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)]); printf ("array[Y][X-1]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)]); printf ("p = %d core->coreArrayNeighborhoodLinearized_2D[1][2] = %d \n",p,core->coreArrayNeighborhoodLinearized_2D[1][2]); printf ("p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0]); printf ("p = %d core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][2][0]); // printf ("array[Y][X+1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)] = %f \n",old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]); printf ("array[Y][X+1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])] = %f \n", old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]); #endif #if 1 // This fails for some random problem... if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,j,(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[0] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1) { #if 0 printf ("---+++ Trivial case of only one equation (define this to be left edge; use the associated references) \n"); printf ("---+++ leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0 && core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // ***** | * | ***** // ---------------------- // ***** | * | ***** // ***** | X | ***** // ***** | X | ***** // ***** | X | ***** // ***** | * | ***** // ---------------------- // ***** | * | ***** #if 0 printf ("---+++ Processing trivial case of only one equation \n"); #endif // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { // arraySection[index2D(0,j)] = (old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(previous_sectionSize[0]-1,j-1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j-1)] + // old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(previous_sectionSize[0]-1,j+1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j+1)]) / 4.0; #if 1 if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),j,(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,j,(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } } else { // assert(sectionSize[0] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 0); #if 0 printf ("---+++ core->coreArrayNeighborhoodSizes_2D[1][0][0] == 0: This is the trival case \n"); #endif } } // ******************** // End of Y Axis update // ******************** #endif #if 1 // ******************************************** // Now process the corners of the X and Y axis. // ******************************************** #if 0 printf ("---+++ Process the edges of the memory section on core p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d core->coreArrayNeighborhoodSizes_2D[1][0][0] = %d core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n", p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][0][0],core->coreArrayNeighborhoodSizes_2D[1][2][0]); printf ("Sizes of current processor: core->coreArrayNeighborhoodSizes_2D[1][1] = (%d,%d,%d) \n",core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // First X Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > 1) { // Left sice corners if (core->boundaryCore_2D[0][0] == true) { // processor boundary condition enforced here (X axis) } else { if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0) { // Next Y Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { // Upper corner if (core->boundaryCore_2D[1][0] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][0] > 0); assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); // Upper left corner // ***** | ****** | ***** // ---------------------- // ***** | X***** | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]]) arraySection[index2D(0,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D(0+old_array.get_haloWidth(0),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_arraySection[index2D(1,0)] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),0,(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_arraySection[index2D(0,1)]) / 4.0; else arraySection[index2D(0,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(1,0)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(0,1)]) / 4.0; #endif } // Lower corner if (core->boundaryCore_2D[1][1] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); assert (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0); // Lower left corner // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | X***** | ***** // ---------------------- // ***** | ****** | ***** #if 0 printf ("--- array[Y][X]: arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); printf ("old_array[Y][X]: old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); printf ("old_array[Y-1][X]: old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] = %f \n",old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)]); printf ("old_array[Y+1][X]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)] = %f \n",old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)]); printf ("old_array[Y][X-1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)] = %f \n", old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)]); printf ("array[Y][X+1]: old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); #endif #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]]) arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D(0+old_array.get_haloWidth(0),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),(core->coreArrayNeighborhoodSizes_2D[1][0][1]-1),(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; else arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; #endif #if 0 printf ("--- array[Y][X]: arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); #endif } } else { // printf ("core->coreArrayNeighborhoodSizes_2D[1][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // Case of upper and lower left corners are the same point // ***** | ****** | ***** // ---------------------- // ***** | X***** | ***** // ---------------------- // ***** | ****** | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]] && old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][0]]) arraySection[index2D(0,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D(0+old_array.get_haloWidth(0),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D(0+old_array.get_haloWidth(0),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),0,(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; else arraySection[index2D(0,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,0)]) / 4.0; #endif } } } else { printf ("We don't support the size on the adjacent being zero! \n"); assert(false); } } // Right side corners if (core->boundaryCore_2D[0][1] == true) { // Can we test if this is realy a boundary? } else { // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // printf ("Right boundary corner not implemented! \n"); // Next Y Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { // Upper corner if (core->boundaryCore_2D[1][0] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][0] > 0); assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); // Upper right corner // ***** | ****** | ***** // ---------------------- // ***** | *****X | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D((core->coreArrayNeighborhoodSizes_2D[0][1][0]-1+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,0,(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } // Lower corner if (core->boundaryCore_2D[1][1] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); assert (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0); // Lower right corner // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | *****X | ***** // ---------------------- // ***** | ****** | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D((core->coreArrayNeighborhoodSizes_2D[2][1][0]-1+old_array.get_haloWidth(0)),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,(core->coreArrayNeighborhoodSizes_2D[2][1][1]-1),(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,core->coreArrayNeighborhoodSizes_2D[2][1][1]-1)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[2][1][1]-1,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } else { // printf ("core->coreArrayNeighborhoodSizes_2D[1][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // Case of upper and lower right corners are the same point // ***** | ****** | ***** // ---------------------- // ***** | *****X | ***** // ---------------------- // ***** | ****** | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]] && old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D((core->coreArrayNeighborhoodSizes_2D[0][1][0]-1+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D((core->coreArrayNeighborhoodSizes_2D[2][1][0]-1+old_array.get_haloWidth(0)),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,0,(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } else { printf ("We don't support the size on the adjacent being zero! \n"); assert(false); } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). #if 0 printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][0] == %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][0]); printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][1] == %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); #endif // assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1); // assert(core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1); // if (sectionSize[0] == 1) // if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1 && core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1 && core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1\n"); // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) // if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false) if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false && core->boundaryCore_2D[1][0] == false && core->boundaryCore_2D[1][1] == false) { // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0 && core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // printf ("Case of single point boundary not implemented! \n"); // ***** | * | ***** // ----------------- // ***** | X | ***** // ----------------- // ***** | * | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]] && old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D((0+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D((0+old_array.get_haloWidth(0)),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),(old_array.get_haloWidth(1)-1),(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,0,(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } #if 0 printf ("Exiting as a test! \n"); assert(false); #endif } } else { // assert(sectionSize[0] == 0); if (core->coreArrayNeighborhoodSizes_2D[1][1][0] != 0) { #if 0 printf ("Warning: p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0]); #endif } // assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] <= 1); #if 0 printf ("---+++ core->coreArrayNeighborhoodSizes_2D[1][0][0] == 0: This is the trival case \n"); #endif } } // ************************************************** // End of processing the corners of the X and Y axis. // ************************************************** #endif } else { #if 0 printf ("This array segment can't be processed for edge handling because it is too small in at least one axis: p = %d size = (%d,%d,%d) \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // assert(false); } #endif } else { #if 0 printf ("2D array too small (still no interior) \n"); #endif } } else { if (arraySizeY == 2) { #if 0 printf ("2D array (with size 2 in Y axis) too small (still no interior) \n"); #endif } else { if (arraySizeX > 2) { // This is the case of 1D relaxation T** old_haloXBottom = old_array.get_haloSectionPointers(0,0); T** old_haloXTop = old_array.get_haloSectionPointers(0,1); #if 0 printf ("--- This is the case of 1D relaxation sectionSize[0] = %d \n",sectionSize[0]); #endif // The core array may higher dimensional then the array and if so then the local size along either // the Y or Z axis may be zero. If so, then we don't want to process the local array section. // if (sectionSize[1] == 1 && sectionSize[2] == 1) // if (sectionSize[0] > 0 && ((sectionSize[1] == 1 && sectionSize[2] == 1) || array.get_tableBasedDistribution() == false)) // if (sectionSize[0] > 0 && (sectionSize[1] == 1 && sectionSize[2] == 1) ) if (core->coreArrayNeighborhoodSizes_1D[1][0] > 0 && (core->coreArrayNeighborhoodSizes_1D[1][1] == 1 && core->coreArrayNeighborhoodSizes_1D[1][2] == 1) ) { #if 0 printf ("--- Process the edges of the memory section on core index = %d sectionSize[0] = %d previous_sectionSize[0] = %d next_sectionSize[0] = %d \n",p,sectionSize[0],previous_sectionSize[0],next_sectionSize[0]); #endif // if (sectionSize[0] > 1) if (core->coreArrayNeighborhoodSizes_1D[1][0] > 1) { #if 0 printf ("-- leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == true) if (core->boundaryCore_1D[0] == true) { #if 0 printf ("--- Apply the array abstraction's LEFT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_leftEdge = (left_stencil_cell_leftEdge + right_stencil_cell_leftEdge) / 2.0; #if 0 printf ("apply equation at left edge of memory segment previous_sectionSize[0] = %d \n",previous_sectionSize[0]); #endif // if (previous_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[0][0] > 0) { if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_1D[0]]) arraySection[0] = (old_haloXBottom[coreID][0] + old_arraySection[1]) / 2.0; else // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySection[1]) / 2.0; arraySection[0] = (old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[0]][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySection[1]) / 2.0; } } // if (rightEdgeSection[0] == true) if (core->boundaryCore_1D[1] == true) { #if 0 printf ("--- Apply the array abstraction's RIGHT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply equation at right edge of memory segment next_sectionSize[0] = %d \n",next_sectionSize[0]); #endif // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[2][0] > 0) { if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_1D[2]]) arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-1] = (old_arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-2] + old_haloXTop[coreID][0]) / 2.0; else // arraySection[sectionSize[0]-1] = (old_arraySection[sectionSize[0]-2] + old_arraySectionPointers[next_coreIndexInLinearArray][0]) / 2.0; arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-1] = (old_arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-2] + old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[2]][0]) / 2.0; } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[0] == 1) if (core->coreArrayNeighborhoodSizes_1D[1][0] == 1) { #if 0 printf ("--- Trivial case of only one equation (define this to be left edge; use the associated references) \n"); printf ("--- leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) if (core->boundaryCore_1D[0] == false && core->boundaryCore_1D[1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[0][0] > 0 && core->coreArrayNeighborhoodSizes_1D[2][0] > 0) { #if 0 printf ("--- Processing trivial case of only one equation \n"); #endif // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][previous_sectionSize[0]-1] + old_arraySectionPointers[next_coreIndexInLinearArray][0]) / 2.0; if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_1D[0]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_1D[2]]) arraySection[0] = (old_haloXBottom[coreID][0] + old_haloXTop[coreID][0]) / 2.0; else arraySection[0] = (old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[0]][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[2]][0]) / 2.0; } } } else { // assert(sectionSize[0] == 0); assert(core->coreArrayNeighborhoodSizes_1D[1][0] == 0); #if 0 printf ("--- sectionSize[0] == 0: This is the trival case \n"); #endif } } } else { #if 0 printf ("--- The local size for this arraySection is zero in either the Y or Z axis sectionSize[1] = %d sectionSize[2] = %d \n",sectionSize[1],sectionSize[2]); #endif } } else { // This is array does not have an interior upon which to relax. #if 0 printf ("--- 1D array too small (still no interior) \n"); #endif } } } } } #endif #if 0 array.display("after relaxation on memory section edges: array"); old_array.display("after relaxation on memory section edges: old_array"); #endif } template <typename T> void relax_on_detachedhalo_boundary_simplified( int coreID, MulticoreArray<T> & array, MulticoreArray<T> & old_array, int dist ) { const int arraySizeX = array.get_arraySize(0); const int arraySizeY = array.get_arraySize(1); const int arraySizeZ = array.get_arraySize(2); int p = coreID; Core<T>* core = array.coreArray[coreID]; // This lifts out loop invariant portions of the code. T** arraySectionPointers = array.get_arraySectionPointers(); T** old_arraySectionPointers = old_array.get_arraySectionPointers(); assert(arraySectionPointers != NULL); assert(old_arraySectionPointers != NULL); T* arraySection = array.get_arraySectionPointers()[p]; T* old_arraySection = old_array.get_arraySectionPointers()[p]; #if 0 printf ("\nIterate over all cores: p = %d arraySection = %p old_arraySection = %p \n",p,arraySection,old_arraySection); #endif assert(arraySection != NULL); assert(old_arraySection != NULL); // ************************************************************** // Fixup internal bounaries of the memory allocated to each core. // ************************************************************** #if 0 printf ("Fixup boundaries: p = %d Array size (%d,%d,%d) sectionSize(%d,%d,%d) coreArray(%d,%d,%d) \n",p,arraySizeX,arraySizeY,arraySizeZ,sectionSize[0],sectionSize[1],sectionSize[2],array.get_coreArraySize(0),array.get_coreArraySize(1),array.get_coreArraySize(2)); #endif if (arraySizeZ > (2*dist)) { if (arraySizeY > (2*dist) && arraySizeX > (2*dist)) { // This is the case of 3D relaxation #if 0 printf ("This is the case of 3D relaxation \n"); // Iterate on the interior of the section (non-shared memory operation, local to the closest local memory declared for each core). printf ("This needs to use sectionSize[0-2] to get the local size instead of the global size! \n"); #endif if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_3D[1][1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] >= 1) { T** old_haloXBottom = old_array.get_haloSectionPointers(0,0); T** old_haloXTop = old_array.get_haloSectionPointers(0,1); T** old_haloYBottom = old_array.get_haloSectionPointers(1,0); T** old_haloYTop = old_array.get_haloSectionPointers(1,1); T** old_haloZBottom = old_array.get_haloSectionPointers(2,0); T** old_haloZTop = old_array.get_haloSectionPointers(2,1); int base_X = (core->boundaryCore_3D[0][0] == true) ? dist : 0; int bound_X = (core->boundaryCore_3D[0][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][0] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][0]; int base_Y = (core->boundaryCore_3D[1][0] == true) ? dist : 0; int bound_Y = (core->boundaryCore_3D[1][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][1] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][1]; int base_Z = (core->boundaryCore_3D[2][0] == true) ? dist : 0; int bound_Z = (core->boundaryCore_3D[2][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][2] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][2]; for (int k = base_Z; k < bound_Z; k++) { for (int j = base_Y; j < bound_Y; j++) { for (int i = base_X; i < bound_X; i++) { if((i >= dist) && (i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist) && (j >= dist) && (j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist) && (k >= dist) && (k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist)) continue; T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y][X-d] */ ((i-d < 0) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)+(i-d)),j,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(i-d,j,k)]) + /* array[Z][Y][X+d] */ ((i+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][0]) ? old_haloXTop[coreID][otherCore_index3D((i+d-core->coreArrayNeighborhoodSizes_3D[1][1][2][0]),j,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(i+d,j,k)]) + /* array[Z][Y-d][X] */ ((j-d < 0) ? old_haloYBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)+(j-d)),k,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,j-d,k)]) + /* array[Z][Y+d][X] */ ((j+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][1]) ? old_haloYTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(j+d-core->coreArrayNeighborhoodSizes_3D[1][2][1][2]),k,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,j+d,k)]) + /* array[Z-d][Y][X] */ ((k-d < 0) ? old_haloZBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(j+old_array.get_haloWidth(1)),(old_array.get_haloWidth(2)+(k-d)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,j,k-d)]) + /* array[Z+d][Y][X] */ ((k+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][2]) ? old_haloZTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(j+old_array.get_haloWidth(1)),(k+d-core->coreArrayNeighborhoodSizes_3D[2][1][1][2]),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,j,k+d)]) ); } arraySection[index3D(i,j,k)] = tmp / (6.0*dist); } } } } } } }
paint_stroke.c
/* * ***** BEGIN GPL LICENSE BLOCK ***** * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) 2009 by Nicholas Bishop * All rights reserved. * * The Original Code is: all of this file. * * Contributor(s): Jason Wilkins, Tom Musgrove. * * ***** END GPL LICENSE BLOCK ***** * */ /** \file blender/editors/sculpt_paint/paint_stroke.c * \ingroup edsculpt */ #include "MEM_guardedalloc.h" #include "BLI_math.h" #include "BLI_utildefines.h" #include "DNA_object_types.h" #include "DNA_scene_types.h" #include "DNA_brush_types.h" #include "RNA_access.h" #include "BKE_context.h" #include "BKE_paint.h" #include "BKE_brush.h" #include "WM_api.h" #include "WM_types.h" #include "BIF_gl.h" #include "BIF_glutil.h" #include "ED_screen.h" #include "ED_view3d.h" #include "paint_intern.h" /* still needed for sculpt_stroke_get_location, should be removed eventually (TODO) */ #include "sculpt_intern.h" #include <float.h> #include <math.h> typedef struct PaintStroke { void *mode_data; void *smooth_stroke_cursor; wmTimer *timer; /* Cached values */ ViewContext vc; bglMats mats; Brush *brush; float last_mouse_position[2]; /* Set whether any stroke step has yet occurred e.g. in sculpt mode, stroke doesn't start until cursor passes over the mesh */ int stroke_started; /* event that started stroke, for modal() return */ int event_type; StrokeGetLocation get_location; StrokeTestStart test_start; StrokeUpdateStep update_step; StrokeDone done; } PaintStroke; /*** Cursor ***/ static void paint_draw_smooth_stroke(bContext *C, int x, int y, void *customdata) { Brush *brush = paint_brush(paint_get_active(CTX_data_scene(C))); PaintStroke *stroke = customdata; glColor4ubv(paint_get_active(CTX_data_scene(C))->paint_cursor_col); glEnable(GL_LINE_SMOOTH); glEnable(GL_BLEND); if(stroke && brush && (brush->flag & BRUSH_SMOOTH_STROKE)) { ARegion *ar = CTX_wm_region(C); sdrawline(x, y, (int)stroke->last_mouse_position[0] - ar->winrct.xmin, (int)stroke->last_mouse_position[1] - ar->winrct.ymin); } glDisable(GL_BLEND); glDisable(GL_LINE_SMOOTH); } typedef struct Snapshot { float size[3]; float ofs[3]; float rot; int brush_size; int winx; int winy; int brush_map_mode; int curve_changed_timestamp; } Snapshot; static int same_snap(Snapshot* snap, Brush* brush, ViewContext* vc) { MTex* mtex = &brush->mtex; return (mtex->tex && mtex->ofs[0] == snap->ofs[0] && mtex->ofs[1] == snap->ofs[1] && mtex->ofs[2] == snap->ofs[2] && mtex->size[0] == snap->size[0] && mtex->size[1] == snap->size[1] && mtex->size[2] == snap->size[2] && mtex->rot == snap->rot) && ((mtex->brush_map_mode == MTEX_MAP_MODE_FIXED && brush_size(brush) <= snap->brush_size) || (brush_size(brush) == snap->brush_size)) && // make brush smaller shouldn't cause a resample mtex->brush_map_mode == snap->brush_map_mode && vc->ar->winx == snap->winx && vc->ar->winy == snap->winy; } static void make_snap(Snapshot* snap, Brush* brush, ViewContext* vc) { if (brush->mtex.tex) { snap->brush_map_mode = brush->mtex.brush_map_mode; copy_v3_v3(snap->ofs, brush->mtex.ofs); copy_v3_v3(snap->size, brush->mtex.size); snap->rot = brush->mtex.rot; } else { snap->brush_map_mode = -1; snap->ofs[0]= snap->ofs[1]= snap->ofs[2]= -1; snap->size[0]= snap->size[1]= snap->size[2]= -1; snap->rot = -1; } snap->brush_size = brush_size(brush); snap->winx = vc->ar->winx; snap->winy = vc->ar->winy; } static int load_tex(Sculpt *sd, Brush* br, ViewContext* vc) { static GLuint overlay_texture = 0; static int init = 0; static int tex_changed_timestamp = -1; static int curve_changed_timestamp = -1; static Snapshot snap; static int old_size = -1; GLubyte* buffer = NULL; int size; int j; int refresh; #ifndef _OPENMP (void)sd; /* quied unused warning */ #endif if (br->mtex.brush_map_mode == MTEX_MAP_MODE_TILED && !br->mtex.tex) return 0; refresh = !overlay_texture || (br->mtex.tex && (!br->mtex.tex->preview || br->mtex.tex->preview->changed_timestamp[0] != tex_changed_timestamp)) || !br->curve || br->curve->changed_timestamp != curve_changed_timestamp || !same_snap(&snap, br, vc); if (refresh) { if (br->mtex.tex && br->mtex.tex->preview) tex_changed_timestamp = br->mtex.tex->preview->changed_timestamp[0]; if (br->curve) curve_changed_timestamp = br->curve->changed_timestamp; make_snap(&snap, br, vc); if (br->mtex.brush_map_mode == MTEX_MAP_MODE_FIXED) { int s = brush_size(br); int r = 1; for (s >>= 1; s > 0; s >>= 1) r++; size = (1<<r); if (size < 256) size = 256; if (size < old_size) size = old_size; } else size = 512; if (old_size != size) { if (overlay_texture) { glDeleteTextures(1, &overlay_texture); overlay_texture = 0; } init = 0; old_size = size; } buffer = MEM_mallocN(sizeof(GLubyte)*size*size, "load_tex"); #pragma omp parallel for schedule(static) if (sd->flags & SCULPT_USE_OPENMP) for (j= 0; j < size; j++) { int i; float y; float len; for (i= 0; i < size; i++) { // largely duplicated from tex_strength const float rotation = -br->mtex.rot; float radius = brush_size(br); int index = j*size + i; float x; float avg; x = (float)i/size; y = (float)j/size; x -= 0.5f; y -= 0.5f; if (br->mtex.brush_map_mode == MTEX_MAP_MODE_TILED) { x *= vc->ar->winx / radius; y *= vc->ar->winy / radius; } else { x *= 2; y *= 2; } len = sqrtf(x*x + y*y); if ((br->mtex.brush_map_mode == MTEX_MAP_MODE_TILED) || len <= 1) { /* it is probably worth optimizing for those cases where the texture is not rotated by skipping the calls to atan2, sqrtf, sin, and cos. */ if (br->mtex.tex && (rotation > 0.001f || rotation < -0.001f)) { const float angle = atan2f(y, x) + rotation; x = len * cosf(angle); y = len * sinf(angle); } x *= br->mtex.size[0]; y *= br->mtex.size[1]; x += br->mtex.ofs[0]; y += br->mtex.ofs[1]; avg = br->mtex.tex ? paint_get_tex_pixel(br, x, y) : 1; avg += br->texture_sample_bias; if (br->mtex.brush_map_mode == MTEX_MAP_MODE_FIXED) avg *= brush_curve_strength(br, len, 1); /* Falloff curve */ buffer[index] = 255 - (GLubyte)(255*avg); } else { buffer[index] = 0; } } } if (!overlay_texture) glGenTextures(1, &overlay_texture); } else { size= old_size; } glBindTexture(GL_TEXTURE_2D, overlay_texture); if (refresh) { if (!init) { glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, size, size, 0, GL_ALPHA, GL_UNSIGNED_BYTE, buffer); init = 1; } else { glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, size, size, GL_ALPHA, GL_UNSIGNED_BYTE, buffer); } if (buffer) MEM_freeN(buffer); } glEnable(GL_TEXTURE_2D); glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); if (br->mtex.brush_map_mode == MTEX_MAP_MODE_FIXED) { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); } return 1; } static int project_brush_radius(RegionView3D* rv3d, float radius, float location[3], bglMats* mats) { float view[3], nonortho[3], ortho[3], offset[3], p1[2], p2[2]; ED_view3d_global_to_vector(rv3d, location, view); // create a vector that is not orthogonal to view if (fabsf(view[0]) < 0.1f) { nonortho[0] = view[0] + 1.0f; nonortho[1] = view[1]; nonortho[2] = view[2]; } else if (fabsf(view[1]) < 0.1f) { nonortho[0] = view[0]; nonortho[1] = view[1] + 1.0f; nonortho[2] = view[2]; } else { nonortho[0] = view[0]; nonortho[1] = view[1]; nonortho[2] = view[2] + 1.0f; } // get a vector in the plane of the view cross_v3_v3v3(ortho, nonortho, view); normalize_v3(ortho); // make a point on the surface of the brush tagent to the view mul_v3_fl(ortho, radius); add_v3_v3v3(offset, location, ortho); // project the center of the brush, and the tagent point to the view onto the screen projectf(mats, location, p1); projectf(mats, offset, p2); // the distance between these points is the size of the projected brush in pixels return len_v2v2(p1, p2); } static int sculpt_get_brush_geometry(bContext* C, int x, int y, int* pixel_radius, float location[3]) { struct PaintStroke *stroke; float window[2]; int hit; stroke = paint_stroke_new(C, NULL, NULL, NULL, NULL, 0); window[0] = x + stroke->vc.ar->winrct.xmin; window[1] = y + stroke->vc.ar->winrct.ymin; if(stroke->vc.obact->sculpt && stroke->vc.obact->sculpt->pbvh && sculpt_stroke_get_location(C, stroke, location, window)) { *pixel_radius = project_brush_radius(stroke->vc.rv3d, brush_unprojected_radius(stroke->brush), location, &stroke->mats); if (*pixel_radius == 0) *pixel_radius = brush_size(stroke->brush); mul_m4_v3(stroke->vc.obact->obmat, location); hit = 1; } else { Sculpt* sd = CTX_data_tool_settings(C)->sculpt; Brush* brush = paint_brush(&sd->paint); *pixel_radius = brush_size(brush); hit = 0; } paint_stroke_free(stroke); return hit; } /* Draw an overlay that shows what effect the brush's texture will have on brush strength */ /* TODO: sculpt only for now */ static void paint_draw_alpha_overlay(Sculpt *sd, Brush *brush, ViewContext *vc, int x, int y) { rctf quad; /* check for overlay mode */ if(!(brush->flag & BRUSH_TEXTURE_OVERLAY) || !(ELEM(brush->mtex.brush_map_mode, MTEX_MAP_MODE_FIXED, MTEX_MAP_MODE_TILED))) return; /* save lots of GL state TODO: check on whether all of these are needed? */ glPushAttrib(GL_COLOR_BUFFER_BIT| GL_CURRENT_BIT| GL_DEPTH_BUFFER_BIT| GL_ENABLE_BIT| GL_LINE_BIT| GL_POLYGON_BIT| GL_STENCIL_BUFFER_BIT| GL_TRANSFORM_BIT| GL_VIEWPORT_BIT| GL_TEXTURE_BIT); if(load_tex(sd, brush, vc)) { glEnable(GL_BLEND); glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); glDepthMask(GL_FALSE); glDepthFunc(GL_ALWAYS); glMatrixMode(GL_TEXTURE); glPushMatrix(); glLoadIdentity(); if(brush->mtex.brush_map_mode == MTEX_MAP_MODE_FIXED) { /* brush rotation */ glTranslatef(0.5, 0.5, 0); glRotatef((double)RAD2DEGF((brush->flag & BRUSH_RAKE) ? sd->last_angle : sd->special_rotation), 0.0, 0.0, 1.0); glTranslatef(-0.5f, -0.5f, 0); /* scale based on tablet pressure */ if(sd->draw_pressure && brush_use_size_pressure(brush)) { glTranslatef(0.5f, 0.5f, 0); glScalef(1.0f/sd->pressure_value, 1.0f/sd->pressure_value, 1); glTranslatef(-0.5f, -0.5f, 0); } if(sd->draw_anchored) { const float *aim = sd->anchored_initial_mouse; const rcti *win = &vc->ar->winrct; quad.xmin = aim[0]-sd->anchored_size - win->xmin; quad.ymin = aim[1]-sd->anchored_size - win->ymin; quad.xmax = aim[0]+sd->anchored_size - win->xmin; quad.ymax = aim[1]+sd->anchored_size - win->ymin; } else { const int radius= brush_size(brush); quad.xmin = x - radius; quad.ymin = y - radius; quad.xmax = x + radius; quad.ymax = y + radius; } } else { quad.xmin = 0; quad.ymin = 0; quad.xmax = vc->ar->winrct.xmax - vc->ar->winrct.xmin; quad.ymax = vc->ar->winrct.ymax - vc->ar->winrct.ymin; } /* set quad color */ glColor4f(U.sculpt_paint_overlay_col[0], U.sculpt_paint_overlay_col[1], U.sculpt_paint_overlay_col[2], brush->texture_overlay_alpha / 100.0f); /* draw textured quad */ glBegin(GL_QUADS); glTexCoord2f(0, 0); glVertex2f(quad.xmin, quad.ymin); glTexCoord2f(1, 0); glVertex2f(quad.xmax, quad.ymin); glTexCoord2f(1, 1); glVertex2f(quad.xmax, quad.ymax); glTexCoord2f(0, 1); glVertex2f(quad.xmin, quad.ymax); glEnd(); glPopMatrix(); } glPopAttrib(); } /* Special actions taken when paint cursor goes over mesh */ /* TODO: sculpt only for now */ static void paint_cursor_on_hit(Sculpt *sd, Brush *brush, ViewContext *vc, float location[3], float *visual_strength) { float unprojected_radius, projected_radius; /* TODO: check whether this should really only be done when brush is over mesh? */ if(sd->draw_pressure && brush_use_alpha_pressure(brush)) (*visual_strength) *= sd->pressure_value; if(sd->draw_anchored) projected_radius = sd->anchored_size; else { if(brush->flag & BRUSH_ANCHORED) projected_radius = 8; else projected_radius = brush_size(brush); } unprojected_radius = paint_calc_object_space_radius(vc, location, projected_radius); if(sd->draw_pressure && brush_use_size_pressure(brush)) unprojected_radius *= sd->pressure_value; if(!brush_use_locked_size(brush)) brush_set_unprojected_radius(brush, unprojected_radius); } static void paint_draw_cursor(bContext *C, int x, int y, void *UNUSED(unused)) { Paint *paint = paint_get_active(CTX_data_scene(C)); Brush *brush = paint_brush(paint); ViewContext vc; float final_radius; float translation[2]; float outline_alpha, *outline_col; /* set various defaults */ translation[0] = x; translation[1] = y; outline_alpha = 0.5; outline_col = brush->add_col; final_radius = brush_size(brush); /* check that brush drawing is enabled */ if(!(paint->flags & PAINT_SHOW_BRUSH)) return; /* can't use stroke vc here because this will be called during mouse over too, not just during a stroke */ view3d_set_viewcontext(C, &vc); /* TODO: as sculpt and other paint modes are unified, this special mode of drawing will go away */ if(vc.obact->sculpt) { Sculpt *sd = CTX_data_tool_settings(C)->sculpt; float location[3]; int pixel_radius, hit; const float root_alpha = brush_alpha(brush); float visual_strength = root_alpha*root_alpha; const float min_alpha = 0.20f; const float max_alpha = 0.80f; /* this is probably here so that rake takes into account the brush movements before the stroke starts, but this doesn't really belong in draw code (TODO) */ { const float u = 0.5f; const float v = 1 - u; const float r = 20; const float dx = sd->last_x - x; const float dy = sd->last_y - y; if(dx*dx + dy*dy >= r*r) { sd->last_angle = atan2(dx, dy); sd->last_x = u*sd->last_x + v*x; sd->last_y = u*sd->last_y + v*y; } } /* test if brush is over the mesh */ hit = sculpt_get_brush_geometry(C, x, y, &pixel_radius, location); /* draw overlay */ paint_draw_alpha_overlay(sd, brush, &vc, x, y); if(brush_use_locked_size(brush)) brush_set_size(brush, pixel_radius); /* check if brush is subtracting, use different color then */ /* TODO: no way currently to know state of pen flip or invert key modifier without starting a stroke */ if((!(brush->flag & BRUSH_INVERTED) ^ !(brush->flag & BRUSH_DIR_IN)) && ELEM5(brush->sculpt_tool, SCULPT_TOOL_DRAW, SCULPT_TOOL_INFLATE, SCULPT_TOOL_CLAY, SCULPT_TOOL_PINCH, SCULPT_TOOL_CREASE)) outline_col = brush->sub_col; /* only do if brush is over the mesh */ if(hit) paint_cursor_on_hit(sd, brush, &vc, location, &visual_strength); /* don't show effect of strength past the soft limit */ if(visual_strength > 1) visual_strength = 1; outline_alpha = ((paint->flags & PAINT_SHOW_BRUSH_ON_SURFACE) ? min_alpha + (visual_strength*(max_alpha-min_alpha)) : 0.50f); if(sd->draw_anchored) { final_radius = sd->anchored_size; translation[0] = sd->anchored_initial_mouse[0] - vc.ar->winrct.xmin; translation[1] = sd->anchored_initial_mouse[1] - vc.ar->winrct.ymin; } } /* make lines pretty */ glEnable(GL_BLEND); glEnable(GL_LINE_SMOOTH); /* set brush color */ glColor4f(outline_col[0], outline_col[1], outline_col[2], outline_alpha); /* draw brush outline */ glTranslatef(translation[0], translation[1], 0); glutil_draw_lined_arc(0.0, M_PI*2.0, final_radius, 40); glTranslatef(-translation[0], -translation[1], 0); /* restore GL state */ glDisable(GL_BLEND); glDisable(GL_LINE_SMOOTH); } /* if this is a tablet event, return tablet pressure and set *pen_flip to 1 if the eraser tool is being used, 0 otherwise */ static float event_tablet_data(wmEvent *event, int *pen_flip) { int erasor = 0; float pressure = 1; if(event->custom == EVT_DATA_TABLET) { wmTabletData *wmtab= event->customdata; erasor = (wmtab->Active == EVT_TABLET_ERASER); pressure = (wmtab->Active != EVT_TABLET_NONE) ? wmtab->Pressure : 1; } if(pen_flip) (*pen_flip) = erasor; return pressure; } /* Put the location of the next stroke dot into the stroke RNA and apply it to the mesh */ static void paint_brush_stroke_add_step(bContext *C, wmOperator *op, wmEvent *event, float mouse_in[2]) { Paint *paint = paint_get_active(CTX_data_scene(C)); Brush *brush = paint_brush(paint); PaintStroke *stroke = op->customdata; float mouse[3]; PointerRNA itemptr; float location[3]; float pressure; int pen_flip; /* see if tablet affects event */ pressure = event_tablet_data(event, &pen_flip); /* TODO: as sculpt and other paint modes are unified, this separation will go away */ if(stroke->vc.obact->sculpt) { float delta[2]; brush_jitter_pos(brush, mouse_in, mouse); /* XXX: meh, this is round about because brush_jitter_pos isn't written in the best way to be reused here */ if(brush->flag & BRUSH_JITTER_PRESSURE) { sub_v2_v2v2(delta, mouse, mouse_in); mul_v2_fl(delta, pressure); add_v2_v2v2(mouse, mouse_in, delta); } } else { copy_v2_v2(mouse, mouse_in); } /* TODO: can remove the if statement once all modes have this */ if(stroke->get_location) stroke->get_location(C, stroke, location, mouse); else zero_v3(location); /* Add to stroke */ RNA_collection_add(op->ptr, "stroke", &itemptr); RNA_float_set_array(&itemptr, "location", location); RNA_float_set_array(&itemptr, "mouse", mouse); RNA_boolean_set(&itemptr, "pen_flip", pen_flip); RNA_float_set(&itemptr, "pressure", pressure); stroke->last_mouse_position[0] = mouse[0]; stroke->last_mouse_position[1] = mouse[1]; stroke->update_step(C, stroke, &itemptr); } /* Returns zero if no sculpt changes should be made, non-zero otherwise */ static int paint_smooth_stroke(PaintStroke *stroke, float output[2], wmEvent *event) { output[0] = event->x; output[1] = event->y; if ((stroke->brush->flag & BRUSH_SMOOTH_STROKE) && !ELEM4(stroke->brush->sculpt_tool, SCULPT_TOOL_GRAB, SCULPT_TOOL_THUMB, SCULPT_TOOL_ROTATE, SCULPT_TOOL_SNAKE_HOOK) && !(stroke->brush->flag & BRUSH_ANCHORED) && !(stroke->brush->flag & BRUSH_RESTORE_MESH)) { float u = stroke->brush->smooth_stroke_factor, v = 1.0f - u; float dx = stroke->last_mouse_position[0] - event->x, dy = stroke->last_mouse_position[1] - event->y; /* If the mouse is moving within the radius of the last move, don't update the mouse position. This allows sharp turns. */ if(dx*dx + dy*dy < stroke->brush->smooth_stroke_radius * stroke->brush->smooth_stroke_radius) return 0; output[0] = event->x * v + stroke->last_mouse_position[0] * u; output[1] = event->y * v + stroke->last_mouse_position[1] * u; } return 1; } /* For brushes with stroke spacing enabled, moves mouse in steps towards the final mouse location. */ static int paint_space_stroke(bContext *C, wmOperator *op, wmEvent *event, const float final_mouse[2]) { PaintStroke *stroke = op->customdata; int cnt = 0; if(paint_space_stroke_enabled(stroke->brush)) { float mouse[2]; float vec[2]; float length, scale; copy_v2_v2(mouse, stroke->last_mouse_position); sub_v2_v2v2(vec, final_mouse, mouse); length = len_v2(vec); if(length > FLT_EPSILON) { int steps; int i; float pressure= 1.0f; /* XXX mysterious :) what has 'use size' do with this here... if you don't check for it, pressure fails */ if(brush_use_size_pressure(stroke->brush)) pressure = event_tablet_data(event, NULL); if(pressure > FLT_EPSILON) { scale = (brush_size(stroke->brush)*pressure*stroke->brush->spacing/50.0f) / length; if(scale > FLT_EPSILON) { mul_v2_fl(vec, scale); steps = (int)(1.0f / scale); for(i = 0; i < steps; ++i, ++cnt) { add_v2_v2(mouse, vec); paint_brush_stroke_add_step(C, op, event, mouse); } } } } } return cnt; } /**** Public API ****/ PaintStroke *paint_stroke_new(bContext *C, StrokeGetLocation get_location, StrokeTestStart test_start, StrokeUpdateStep update_step, StrokeDone done, int event_type) { PaintStroke *stroke = MEM_callocN(sizeof(PaintStroke), "PaintStroke"); stroke->brush = paint_brush(paint_get_active(CTX_data_scene(C))); view3d_set_viewcontext(C, &stroke->vc); view3d_get_transformation(stroke->vc.ar, stroke->vc.rv3d, stroke->vc.obact, &stroke->mats); stroke->get_location = get_location; stroke->test_start = test_start; stroke->update_step = update_step; stroke->done = done; stroke->event_type= event_type; /* for modal, return event */ return stroke; } void paint_stroke_free(PaintStroke *stroke) { MEM_freeN(stroke); } /* Returns zero if the stroke dots should not be spaced, non-zero otherwise */ int paint_space_stroke_enabled(Brush *br) { return (br->flag & BRUSH_SPACE) && !(br->flag & BRUSH_ANCHORED) && !ELEM4(br->sculpt_tool, SCULPT_TOOL_GRAB, SCULPT_TOOL_THUMB, SCULPT_TOOL_ROTATE, SCULPT_TOOL_SNAKE_HOOK); } int paint_stroke_modal(bContext *C, wmOperator *op, wmEvent *event) { PaintStroke *stroke = op->customdata; float mouse[2]; int first= 0; // let NDOF motion pass through to the 3D view so we can paint and rotate simultaneously! // this isn't perfect... even when an extra MOUSEMOVE is spoofed, the stroke discards it // since the 2D deltas are zero -- code in this file needs to be updated to use the // post-NDOF_MOTION MOUSEMOVE if (event->type == NDOF_MOTION) return OPERATOR_PASS_THROUGH; if(!stroke->stroke_started) { stroke->last_mouse_position[0] = event->x; stroke->last_mouse_position[1] = event->y; stroke->stroke_started = stroke->test_start(C, op, event); if(stroke->stroke_started) { stroke->smooth_stroke_cursor = WM_paint_cursor_activate(CTX_wm_manager(C), paint_poll, paint_draw_smooth_stroke, stroke); if(stroke->brush->flag & BRUSH_AIRBRUSH) stroke->timer = WM_event_add_timer(CTX_wm_manager(C), CTX_wm_window(C), TIMER, stroke->brush->rate); } first= 1; //ED_region_tag_redraw(ar); } if(event->type == stroke->event_type && event->val == KM_RELEASE) { /* exit stroke, free data */ if(stroke->smooth_stroke_cursor) WM_paint_cursor_end(CTX_wm_manager(C), stroke->smooth_stroke_cursor); if(stroke->timer) WM_event_remove_timer(CTX_wm_manager(C), CTX_wm_window(C), stroke->timer); stroke->done(C, stroke); MEM_freeN(stroke); return OPERATOR_FINISHED; } else if(first || ELEM(event->type, MOUSEMOVE, INBETWEEN_MOUSEMOVE) || (event->type == TIMER && (event->customdata == stroke->timer))) { if(stroke->stroke_started) { if(paint_smooth_stroke(stroke, mouse, event)) { if(paint_space_stroke_enabled(stroke->brush)) { if(!paint_space_stroke(C, op, event, mouse)) { //ED_region_tag_redraw(ar); } } else { paint_brush_stroke_add_step(C, op, event, mouse); } } else { ;//ED_region_tag_redraw(ar); } } } /* we want the stroke to have the first daub at the start location instead of waiting till we have moved the space distance */ if(first && stroke->stroke_started && paint_space_stroke_enabled(stroke->brush) && !(stroke->brush->flag & BRUSH_ANCHORED) && !(stroke->brush->flag & BRUSH_SMOOTH_STROKE)) { paint_brush_stroke_add_step(C, op, event, mouse); } return OPERATOR_RUNNING_MODAL; } int paint_stroke_exec(bContext *C, wmOperator *op) { PaintStroke *stroke = op->customdata; /* only when executed for the first time */ if(stroke->stroke_started == 0) { /* XXX stroke->last_mouse_position is unset, this may cause problems */ stroke->test_start(C, op, NULL); stroke->stroke_started= 1; } RNA_BEGIN(op->ptr, itemptr, "stroke") { stroke->update_step(C, stroke, &itemptr); } RNA_END; stroke->done(C, stroke); MEM_freeN(stroke); op->customdata = NULL; return OPERATOR_FINISHED; } int paint_stroke_cancel(bContext *C, wmOperator *op) { PaintStroke *stroke = op->customdata; if(stroke->done) stroke->done(C, stroke); MEM_freeN(stroke); op->customdata = NULL; return OPERATOR_CANCELLED; } ViewContext *paint_stroke_view_context(PaintStroke *stroke) { return &stroke->vc; } void *paint_stroke_mode_data(struct PaintStroke *stroke) { return stroke->mode_data; } void paint_stroke_set_mode_data(PaintStroke *stroke, void *mode_data) { stroke->mode_data = mode_data; } int paint_poll(bContext *C) { Paint *p = paint_get_active(CTX_data_scene(C)); Object *ob = CTX_data_active_object(C); return p && ob && paint_brush(p) && CTX_wm_area(C)->spacetype == SPACE_VIEW3D && CTX_wm_region(C)->regiontype == RGN_TYPE_WINDOW; } void paint_cursor_start(bContext *C, int (*poll)(bContext *C)) { Paint *p = paint_get_active(CTX_data_scene(C)); if(p && !p->paint_cursor) p->paint_cursor = WM_paint_cursor_activate(CTX_wm_manager(C), poll, paint_draw_cursor, NULL); }
par_relax.c
/*BHEADER********************************************************************** * Copyright (c) 2017, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322. * This file is part of AMG. See files README and COPYRIGHT for details. * * AMG is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * This software is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the * GNU General Public License for more details. * ***********************************************************************EHEADER*/ /****************************************************************************** * * Relaxation scheme * *****************************************************************************/ #include "_hypre_parcsr_ls.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGRelax *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_type, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n_global= hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int first_index = hypre_ParVectorFirstIndex(u); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Real *tmp_data; hypre_Vector *Ztemp_local; HYPRE_Real *Ztemp_data; hypre_CSRMatrix *A_CSR; HYPRE_Int *A_CSR_i; HYPRE_Int *A_CSR_j; HYPRE_Real *A_CSR_data; hypre_Vector *f_vector; HYPRE_Real *f_vector_data; HYPRE_Int i, j, jr; HYPRE_Int ii, jj; HYPRE_Int ns, ne, size, rest; HYPRE_Int column; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int num_recvs; HYPRE_Int index, start; HYPRE_Int num_procs, num_threads, my_id, ip, p; HYPRE_Int vec_start, vec_len; hypre_MPI_Status *status; hypre_MPI_Request *requests; HYPRE_Real *A_mat; HYPRE_Real *b_vec; HYPRE_Real zero = 0.0; HYPRE_Real res, res0, res2; HYPRE_Real one_minus_weight; HYPRE_Real one_minus_omega; HYPRE_Real prod; one_minus_weight = 1.0 - relax_weight; one_minus_omega = 1.0 - omega; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*----------------------------------------------------------------------- * Switch statement to direct control based on relax_type: * relax_type = 0 -> Jacobi or CF-Jacobi * relax_type = 1 -> Gauss-Seidel <--- very slow, sequential * relax_type = 2 -> Gauss_Seidel: interior points in parallel , * boundary sequential * relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (forward solve) * relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (backward solve) * relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node * relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor * with outer relaxation parameters * relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR * relax_type = 19-> Direct Solve, (old version) * relax_type = 29-> Direct solve: use gaussian elimination & BLAS * (with pivoting) (old version) *-----------------------------------------------------------------------*/ switch (relax_type) { case 0: /* Weighted Jacobi */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_points == 0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= one_minus_weight; u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= one_minus_weight; u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 5: /* Hybrid: Jacobi off-processor, chaotic Gauss-Seidel on-processor */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_points == 0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 3: /* Hybrid: Jacobi off-processor, Gauss-Seidel on-processor (forward loop) */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } #ifdef HYPRE_USING_PERSISTENT_COMM // JSP: persistent comm can be similarly used for other smoothers hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (num_procs > 1) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); v_buf_data = (HYPRE_Real *)persistent_comm_handle->send_data; Vext_data = (HYPRE_Real *)persistent_comm_handle->recv_data; #else v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); #endif if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; i++) { v_buf_data[i - begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)]; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle); #else comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); #endif /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle); #else hypre_ParCSRCommHandleDestroy(comm_handle); #endif comm_handle = NULL; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime(); #endif if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } } #ifndef HYPRE_USING_PERSISTENT_COMM if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime(); #endif } break; case 1: /* Gauss-Seidel VERY SLOW */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); status = hypre_CTAlloc(hypre_MPI_Status,num_recvs+num_sends); requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ /* for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } */ } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start; for (j=vec_start; j < vec_start+vec_len; j++) v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr,requests,status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start; hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr,requests,status); } if (relax_points == 0) { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) hypre_MPI_Barrier(comm); } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); hypre_TFree(status); hypre_TFree(requests); } } break; case 2: /* Gauss-Seidel: relax interior points in parallel, boundary sequentially */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); status = hypre_CTAlloc(hypre_MPI_Status,num_recvs+num_sends); requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ /* for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } */ /*----------------------------------------------------------------- * Relax interior points first *-----------------------------------------------------------------*/ if (relax_points == 0) { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ((A_offd_i[i+1]-A_offd_i[i]) == zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } else { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && (A_offd_i[i+1]-A_offd_i[i]) == zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start; for (j=vec_start; j < vec_start+vec_len; j++) v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr,requests,status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start; hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr,requests,status); } if (relax_points == 0) { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ((A_offd_i[i+1]-A_offd_i[i]) != zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && (A_offd_i[i+1]-A_offd_i[i]) != zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) hypre_MPI_Barrier(comm); } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); hypre_TFree(status); hypre_TFree(requests); } } break; case 4: /* Hybrid: Jacobi off-processor, Gauss-Seidel/SOR on-processor (backward loop) */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = hypre_CTAlloc(HYPRE_Real,n); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } hypre_TFree(tmp_data); } else { for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = hypre_CTAlloc(HYPRE_Real,n); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } hypre_TFree(tmp_data); } else { for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = hypre_CTAlloc(HYPRE_Real,n); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } hypre_TFree(tmp_data); } else { for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = hypre_CTAlloc(HYPRE_Real,n); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } hypre_TFree(tmp_data); } else { for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 6: /* Hybrid: Jacobi off-processor, Symm. Gauss-Seidel/ SSOR on-processor with outer relaxation parameter */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 7: /* Jacobi (uses ParMatvec) */ { /*----------------------------------------------------------------- * Copy f into temporary vector. *-----------------------------------------------------------------*/ hypre_ParVectorCopy(f,Vtemp); /*----------------------------------------------------------------- * Perform Matvec Vtemp=f-Au *-----------------------------------------------------------------*/ hypre_ParCSRMatrixMatvec(-relax_weight,A, u, relax_weight, Vtemp); for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ u_data[i] += Vtemp_data[i] / l1_norms[i]; } } break; case 8: /* hybrid L1 Symm. Gauss-Seidel */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 13: /* hybrid L1 Gauss-Seidel forward solve */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 14: /* hybrid L1 Gauss-Seidel backward solve */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } } break; case 19: /* Direct solve: use gaussian elimination */ { /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ #ifdef HYPRE_NO_GLOBAL_PARTITION /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); if (n) { #else if (n) { A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); #endif A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global); b_vec = hypre_CTAlloc(HYPRE_Real, n_global); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++) { column = A_CSR_j[jj]; A_mat[i*n_global+column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } relax_error = gselim(A_mat,b_vec,n_global); for (i = 0; i < n; i++) { u_data[i] = b_vec[first_index+i]; } hypre_TFree(A_mat); hypre_TFree(b_vec); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } #ifdef HYPRE_NO_GLOBAL_PARTITION else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } #endif } break; } return(relax_error); } /*------------------------------------------------------------------------- * * Gaussian Elimination * *------------------------------------------------------------------------ */ HYPRE_Int hypre_GaussElimSetup (hypre_ParAMGData *amg_data, HYPRE_Int level, HYPRE_Int relax_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_GS_ELIM_SETUP] -= hypre_MPI_Wtime(); #endif /* Par Data Structure variables */ hypre_ParCSRMatrix *A = hypre_ParAMGDataAArray(amg_data)[level]; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); MPI_Comm new_comm; /* Generate sub communicator */ hypre_GenerateSubComm(comm, num_rows, &new_comm); if (num_rows) { hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Real *A_mat, *A_mat_local; HYPRE_Int *comm_info, *info, *displs; HYPRE_Int *mat_info, *mat_displs; HYPRE_Int new_num_procs, A_mat_local_size, i, jj, column; HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); hypre_MPI_Comm_size(new_comm, &new_num_procs); comm_info = hypre_CTAlloc(HYPRE_Int, 2*new_num_procs+1); mat_info = hypre_CTAlloc(HYPRE_Int, new_num_procs); mat_displs = hypre_CTAlloc(HYPRE_Int, new_num_procs+1); info = &comm_info[0]; displs = &comm_info[new_num_procs]; hypre_MPI_Allgather(&num_rows, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, new_comm); displs[0] = 0; mat_displs[0] = 0; for (i=0; i < new_num_procs; i++) { displs[i+1] = displs[i]+info[i]; mat_displs[i+1] = global_num_rows*displs[i+1]; mat_info[i] = global_num_rows*info[i]; } hypre_ParAMGDataBVec(amg_data) = hypre_CTAlloc(HYPRE_Real, global_num_rows); A_mat_local_size = global_num_rows*num_rows; A_mat_local = hypre_CTAlloc(HYPRE_Real, A_mat_local_size); A_mat = hypre_CTAlloc(HYPRE_Real, global_num_rows*global_num_rows); /* load local matrix into A_mat_local */ for (i = 0; i < num_rows; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { /* need col major */ column = A_diag_j[jj]+first_row_index; A_mat_local[i*global_num_rows + column] = A_diag_data[jj]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { /* need col major */ column = col_map_offd[A_offd_j[jj]]; A_mat_local[i*global_num_rows + column] = A_offd_data[jj]; } } hypre_MPI_Allgatherv( A_mat_local, A_mat_local_size, HYPRE_MPI_REAL, A_mat, mat_info, mat_displs, HYPRE_MPI_REAL, new_comm); if (relax_type == 99) { HYPRE_Real *AT_mat; AT_mat = hypre_CTAlloc(HYPRE_Real, global_num_rows*global_num_rows); for (i=0; i < global_num_rows; i++) for (jj=0; jj < global_num_rows; jj++) AT_mat[i*global_num_rows + jj] = A_mat[i+ jj*global_num_rows]; hypre_ParAMGDataAMat(amg_data) = AT_mat; hypre_TFree (A_mat); } else hypre_ParAMGDataAMat(amg_data) = A_mat; hypre_ParAMGDataCommInfo(amg_data) = comm_info; hypre_ParAMGDataNewComm(amg_data) = new_comm; hypre_TFree(mat_info); hypre_TFree(mat_displs); hypre_TFree(A_mat_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_GS_ELIM_SETUP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } HYPRE_Int hypre_GaussElimSolve (hypre_ParAMGData *amg_data, HYPRE_Int level, HYPRE_Int relax_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_GS_ELIM_SOLVE] -= hypre_MPI_Wtime(); #endif hypre_ParCSRMatrix *A = hypre_ParAMGDataAArray(amg_data)[level]; HYPRE_Int n = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int error_flag = 0; if (n) { MPI_Comm new_comm = hypre_ParAMGDataNewComm(amg_data); hypre_ParVector *f = hypre_ParAMGDataFArray(amg_data)[level]; hypre_ParVector *u = hypre_ParAMGDataUArray(amg_data)[level]; HYPRE_Real *A_mat = hypre_ParAMGDataAMat(amg_data); HYPRE_Real *b_vec = hypre_ParAMGDataBVec(amg_data); HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f)); HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); HYPRE_Real *A_tmp; HYPRE_Int *comm_info = hypre_ParAMGDataCommInfo(amg_data); HYPRE_Int *displs, *info; HYPRE_Int n_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int new_num_procs, i, my_info; HYPRE_Int first_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int one_i = 1; hypre_MPI_Comm_size(new_comm, &new_num_procs); info = &comm_info[0]; displs = &comm_info[new_num_procs]; hypre_MPI_Allgatherv ( f_data, n, HYPRE_MPI_REAL, b_vec, info, displs, HYPRE_MPI_REAL, new_comm ); A_tmp = hypre_CTAlloc (HYPRE_Real, n_global*n_global); for (i=0; i < n_global*n_global; i++) A_tmp[i] = A_mat[i]; if (relax_type == 9) { error_flag = gselim(A_tmp,b_vec,n_global); } for (i = 0; i < n; i++) { u_data[i] = b_vec[first_index+i]; } hypre_TFree(A_tmp); } if (error_flag) hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_GS_ELIM_SOLVE] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } HYPRE_Int gselim(HYPRE_Real *A, HYPRE_Real *x, HYPRE_Int n) { HYPRE_Int err_flag = 0; HYPRE_Int j,k,m; HYPRE_Real factor; HYPRE_Real divA; if (n==1) /* A is 1x1 */ { if (A[0] != 0.0) { x[0] = x[0]/A[0]; return(err_flag); } else { err_flag = 1; return(err_flag); } } else /* A is nxn. Forward elimination */ { for (k = 0; k < n-1; k++) { if (A[k*n+k] != 0.0) { divA = 1.0/A[k*n+k]; for (j = k+1; j < n; j++) { if (A[j*n+k] != 0.0) { factor = A[j*n+k]*divA; for (m = k+1; m < n; m++) { A[j*n+m] -= factor * A[k*n+m]; } /* Elimination step for rhs */ x[j] -= factor * x[k]; } } } } /* Back Substitution */ for (k = n-1; k > 0; --k) { if (A[k*n+k] != 0.0) { x[k] /= A[k*n+k]; for (j = 0; j < k; j++) { if (A[j*n+k] != 0.0) { x[j] -= x[k] * A[j*n+k]; } } } } if (A[0] != 0.0) x[0] /= A[0]; return(err_flag); } }
bfs_simple.c
/* Copyright (C) 2010-2011 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #include "common.h" #include "oned_csr.h" #include <mpi.h> #include <stdint.h> #include <inttypes.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <limits.h> #include <assert.h> static oned_csr_graph g; static int64_t* g_oldq; static int64_t* g_newq; static unsigned long* g_visited; static const int coalescing_size = 256; static int64_t* g_outgoing; static size_t* g_outgoing_counts /* 2x actual count */; static MPI_Request* g_outgoing_reqs; static int* g_outgoing_reqs_active; static int64_t* g_recvbuf; void make_graph_data_structure(const tuple_graph* const tg) { //makes the tuple graph into a one directional compressed sparce row graph convert_graph_to_oned_csr(tg, &g); const size_t nlocalverts = g.nlocalverts; g_oldq = (int64_t*)xmalloc(nlocalverts * sizeof(int64_t)); g_newq = (int64_t*)xmalloc(nlocalverts * sizeof(int64_t)); const int ulong_bits = sizeof(unsigned long) * CHAR_BIT; int64_t visited_size = (nlocalverts + ulong_bits - 1) / ulong_bits; g_visited = (unsigned long*)xmalloc(visited_size * sizeof(unsigned long)); g_outgoing = (int64_t*)xMPI_Alloc_mem(coalescing_size * size * 2 * sizeof(int64_t)); g_outgoing_counts = (size_t*)xmalloc(size * sizeof(size_t)) /* 2x actual count */; g_outgoing_reqs = (MPI_Request*)xmalloc(size * sizeof(MPI_Request)); g_outgoing_reqs_active = (int*)xmalloc(size * sizeof(int)); g_recvbuf = (int64_t*)xMPI_Alloc_mem(coalescing_size * 2 * sizeof(int64_t)); } void free_graph_data_structure(void) { free(g_oldq); free(g_newq); free(g_visited); MPI_Free_mem(g_outgoing); free(g_outgoing_counts); free(g_outgoing_reqs); free(g_outgoing_reqs_active); MPI_Free_mem(g_recvbuf); free_oned_csr_graph(&g); } int bfs_writes_depth_map(void) { return 0; } /* This version is the traditional level-synchronized BFS using two queues. A * bitmap is used to indicate which vertices have been visited. Messages are * sent and processed asynchronously throughout the code to hopefully overlap * communication with computation. */ void run_bfs(int64_t root, int64_t* pred) { const size_t nlocalverts = g.nlocalverts; /* Set up the queues. */ int64_t* restrict oldq = g_oldq; int64_t* restrict newq = g_newq; size_t oldq_count = 0; size_t newq_count = 0; /* Set up the visited bitmap. */ const int ulong_bits = sizeof(unsigned long) * CHAR_BIT; int64_t visited_size = (nlocalverts + ulong_bits - 1) / ulong_bits; unsigned long* restrict visited = g_visited; memset(visited, 0, visited_size * sizeof(unsigned long)); #define SET_VISITED(v) do {visited[VERTEX_LOCAL((v)) / ulong_bits] |= (1UL << (VERTEX_LOCAL((v)) % ulong_bits));} while (0) #define TEST_VISITED(v) ((visited[VERTEX_LOCAL((v)) / ulong_bits] & (1UL << (VERTEX_LOCAL((v)) % ulong_bits))) != 0) /* Set up buffers for message coalescing, MPI requests, etc. for * communication. */ const int coalescing_size = 256; int64_t* restrict outgoing = g_outgoing; size_t* restrict outgoing_counts = g_outgoing_counts; MPI_Request* restrict outgoing_reqs = g_outgoing_reqs; int* restrict outgoing_reqs_active = g_outgoing_reqs_active; memset(outgoing_reqs_active, 0, size * sizeof(int)); int64_t* restrict recvbuf = g_recvbuf; MPI_Request recvreq; int recvreq_active = 0; /* Termination counter for each level: this variable counts the number of * ranks that have said that they are done sending to me in the current * level. This rank can stop listening for new messages when it reaches * size. */ int num_ranks_done; /* Set all vertices to "not visited." */ {size_t i; for (i = 0; i < nlocalverts; ++i) pred[i] = -1;} /* Mark the root and put it into the queue. */ if (VERTEX_OWNER(root) == rank) { SET_VISITED(root); pred[VERTEX_LOCAL(root)] = root; oldq[oldq_count++] = root; } #define CHECK_MPI_REQS \ /* Check all MPI requests and handle any that have completed. */ \ do { \ /* Test for incoming vertices to put onto the queue. */ \ while (recvreq_active) { \ int flag; \ MPI_Status st; \ MPI_Test(&recvreq, &flag, &st); \ if (flag) { \ recvreq_active = 0; \ int count; \ MPI_Get_count(&st, MPI_INT64_T, &count); \ /* count == 0 is a signal from a rank that it is done sending to me * (using MPI's non-overtaking rules to keep that signal after all * "real" messages. */ \ if (count == 0) { \ ++num_ranks_done; \ } else { \ int j; \ for (j = 0; j < count; j += 2) { \ int64_t tgt = recvbuf[j]; \ int64_t src = recvbuf[j + 1]; \ /* Process one incoming edge. */ \ assert (VERTEX_OWNER(tgt) == rank); \ if (!TEST_VISITED(tgt)) { \ SET_VISITED(tgt); \ pred[VERTEX_LOCAL(tgt)] = src; \ newq[newq_count++] = tgt; \ } \ } \ } \ /* Restart the receive if more messages will be coming. */ \ if (num_ranks_done < size) { \ MPI_Irecv(recvbuf, coalescing_size * 2, MPI_INT64_T, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &recvreq); \ recvreq_active = 1; \ } \ } else break; \ } \ /* Mark any sends that completed as inactive so their buffers can be * reused. */ \ int c; \ for (c = 0; c < size; ++c) { \ if (outgoing_reqs_active[c]) { \ int flag; \ MPI_Test(&outgoing_reqs[c], &flag, MPI_STATUS_IGNORE); \ if (flag) outgoing_reqs_active[c] = 0; \ } \ } \ } while (0) while (1) { memset(outgoing_counts, 0, size * sizeof(size_t)); num_ranks_done = 1; /* I never send to myself, so I'm always done */ /* Start the initial receive. */ if (num_ranks_done < size) { MPI_Irecv(recvbuf, coalescing_size * 2, MPI_INT64_T, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &recvreq); recvreq_active = 1; } /* Step through the current level's queue. */ size_t i; for (i = 0; i < oldq_count; ++i) { CHECK_MPI_REQS; assert (VERTEX_OWNER(oldq[i]) == rank); assert (pred[VERTEX_LOCAL(oldq[i])] >= 0 && pred[VERTEX_LOCAL(oldq[i])] < g.nglobalverts); int64_t src = oldq[i]; /* Iterate through its incident edges. */ size_t j, j_end = g.rowstarts[VERTEX_LOCAL(oldq[i]) + 1]; //so iterate through all elements in the row j, stop when you get to //the next rowstart for (j = g.rowstarts[VERTEX_LOCAL(oldq[i])]; j < j_end; ++j) { //we have the row that j is in and this will give us the column aka position int64_t tgt = g.column[j]; int owner = VERTEX_OWNER(tgt); /* If the other endpoint is mine, update the visited map, predecessor * map, and next-level queue locally; otherwise, send the target and * the current vertex (its possible predecessor) to the target's owner. * */ if (owner == rank) { if (!TEST_VISITED(tgt)) { SET_VISITED(tgt); pred[VERTEX_LOCAL(tgt)] = src; newq[newq_count++] = tgt; } } else { while (outgoing_reqs_active[owner]) CHECK_MPI_REQS; /* Wait for buffer to be available */ size_t c = outgoing_counts[owner]; outgoing[owner * coalescing_size * 2 + c] = tgt; outgoing[owner * coalescing_size * 2 + c + 1] = src; outgoing_counts[owner] += 2; if (outgoing_counts[owner] == coalescing_size * 2) { MPI_Isend(&outgoing[owner * coalescing_size * 2], coalescing_size * 2, MPI_INT64_T, owner, 0, MPI_COMM_WORLD, &outgoing_reqs[owner]); outgoing_reqs_active[owner] = 1; outgoing_counts[owner] = 0; } } } } /* Flush any coalescing buffers that still have messages. */ int offset; for (offset = 1; offset < size; ++offset) { int dest = MOD_SIZE(rank + offset); if (outgoing_counts[dest] != 0) { while (outgoing_reqs_active[dest]) CHECK_MPI_REQS; MPI_Isend(&outgoing[dest * coalescing_size * 2], outgoing_counts[dest], MPI_INT64_T, dest, 0, MPI_COMM_WORLD, &outgoing_reqs[dest]); outgoing_reqs_active[dest] = 1; outgoing_counts[dest] = 0; } /* Wait until all sends to this destination are done. */ while (outgoing_reqs_active[dest]) CHECK_MPI_REQS; /* Tell the destination that we are done sending to them. */ MPI_Isend(&outgoing[dest * coalescing_size * 2], 0, MPI_INT64_T, dest, 0, MPI_COMM_WORLD, &outgoing_reqs[dest]); /* Signal no more sends */ outgoing_reqs_active[dest] = 1; while (outgoing_reqs_active[dest]) CHECK_MPI_REQS; } /* Wait until everyone else is done (and thus couldn't send us any more * messages). */ while (num_ranks_done < size) CHECK_MPI_REQS; /* Test globally if all queues are empty. */ int64_t global_newq_count; MPI_Allreduce(&newq_count, &global_newq_count, 1, MPI_INT64_T, MPI_SUM, MPI_COMM_WORLD); /* Quit if they all are empty. */ if (global_newq_count == 0) break; /* Swap old and new queues; clear new queue for next level. */ {int64_t* temp = oldq; oldq = newq; newq = temp;} oldq_count = newq_count; newq_count = 0; } #undef CHECK_MPI_REQS } void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) { const int64_t* restrict vertex = vertex_p; int* restrict owner = owner_p; size_t* restrict local = local_p; ptrdiff_t i; #pragma omp parallel for for (i = 0; i < (ptrdiff_t)count; ++i) { owner[i] = VERTEX_OWNER(vertex[i]); local[i] = VERTEX_LOCAL(vertex[i]); } } int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) { return VERTEX_TO_GLOBAL(v_rank, v_local); } size_t get_nlocalverts_for_pred(void) { return g.nlocalverts; }
GB_cumsum.c
//------------------------------------------------------------------------------ // GB_cumsum: cumlative sum of an array //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Compute the cumulative sum of an array count[0:n], of size n+1 // in pseudo-MATLAB notation: // k = sum (count [0:n-1] != 0) ; // count = cumsum ([0 count[0:n-1]]) ; // That is, count [j] on input is overwritten with the value of // sum (count [0..j-1]). count [n] is implicitly zero on input. // On output, count [n] is the total sum. #include "GB.h" void GB_cumsum // compute the cumulative sum of an array ( int64_t *restrict count, // size n+1, input/output const int64_t n, int64_t *restrict kresult, // return k, if needed by the caller int nthreads ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (count != NULL) ; ASSERT (n >= 0) ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- #if !defined ( _OPENMP ) nthreads = 1 ; #endif if (nthreads > 1) { nthreads = GB_IMIN (nthreads, n / 1024) ; nthreads = GB_IMAX (nthreads, 1) ; } //-------------------------------------------------------------------------- // count = cumsum ([0 count[0:n-1]]) ; //-------------------------------------------------------------------------- if (kresult == NULL) { if (nthreads <= 2) { //------------------------------------------------------------------ // cumsum with one thread //------------------------------------------------------------------ int64_t s = 0 ; for (int64_t i = 0 ; i < n ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } count [n] = s ; } else { //------------------------------------------------------------------ // cumsum with multiple threads //------------------------------------------------------------------ int64_t ws [nthreads+1] ; #pragma omp parallel num_threads(nthreads) { // each thread sums up its own part int tid = GB_OPENMP_THREAD_ID ; int64_t istart, iend ; GB_PARTITION (istart, iend, n, tid, nthreads) ; int64_t s = 0 ; for (int64_t i = istart ; i < iend ; i++) { s += count [i] ; } ws [tid] = s ; #pragma omp barrier // each thread computes the cumsum of its own part s = 0 ; for (int i = 0 ; i < tid ; i++) { s += ws [i] ; } for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } if (iend == n) { count [n] = s ; } } } } else { if (nthreads <= 2) { //------------------------------------------------------------------ // cumsum with one thread, also compute k //------------------------------------------------------------------ int64_t k = 0 ; int64_t s = 0 ; for (int64_t i = 0 ; i < n ; i++) { int64_t c = count [i] ; if (c != 0) k++ ; count [i] = s ; s += c ; } count [n] = s ; (*kresult) = k ; } else { //------------------------------------------------------------------ // cumsum with multiple threads, also compute k //------------------------------------------------------------------ int64_t ws [nthreads+1] ; int64_t wk [nthreads+1] ; #pragma omp parallel num_threads(nthreads) { // each thread sums up its own part int tid = GB_OPENMP_THREAD_ID ; int64_t istart, iend ; GB_PARTITION (istart, iend, n, tid, nthreads) ; int64_t k = 0 ; int64_t s = 0 ; for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; if (c != 0) k++ ; s += c ; } ws [tid] = s ; wk [tid] = k ; #pragma omp barrier // each thread computes the cumsum of its own part s = 0 ; for (int i = 0 ; i < tid ; i++) { s += ws [i] ; } for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } if (iend == n) { count [n] = s ; } } int64_t k = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { k += wk [tid] ; } (*kresult) = k ; } } }
nodes.c
#include <stddef.h> #ifdef __cplusplus extern "C" { #endif extern void CXX_Walk_Double(char *l, const char *h, const size_t sz, double *b); #ifdef __cplusplus } #endif #include <stdio.h> #include <stdint.h> #include <omp.h> #include "allocator.h" #include "geometry.h" //#include "fio.h" #include "mesh.h" size_t nmalloc(char *fbuf, struct ntbl *n) { size_t sz = n->sz * 4; double *buf = (double *) fun3d_malloc(sz, sizeof(double)); size_t bytes = sz * sizeof(double); //struct wtbl w; //{ // w.l = fbuf; // w.h = fbuf + bytes; // w.t = DOUBLE; // w.sz = sz; //} //walkfbuf(&w, buf); CXX_Walk_Double(fbuf, fbuf + bytes, sz, buf); // Partitioned the data and arrange them uint32_t i; #pragma omp parallel for for(i = 0; i < n->sz; i++) { n->xyz->x0[i] = buf[i]; n->xyz->x1[i] = buf[i + n->sz]; n->xyz->x2[i] = buf[i + n->sz + n->sz]; /* Ignore the area, deprecated in the newer version */ /* n->area[i] = buf[i + n->sz + n->sz + n->sz]; */ } fun3d_free(buf); n->cdt = (double *) fun3d_malloc(n->sz, sizeof(double)); return bytes; }
special_ops.h
#pragma once #include <ops/ops.h> #include <loops/reduce.h> #include <loops/scalar.h> #include <loops/indexreduce.h> #include <loops/broadcasting.h> namespace functions { namespace broadcast { template <typename T> class Broadcast; } namespace transform { template <typename T> class Transform; } namespace scalar { } namespace reduce { template <typename T> class ReduceFunction; } } namespace simdOps { template<typename T> class Pooling2D { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ inline __host__ __device__ #elif defined(__GNUC__) #endif static int outSize(int size, int k, int s, int p, bool coverAll) { if (coverAll) return (size + p * 2 - k + s - 1) / s + 1; else return (size + p * 2 - k) / s + 1; } #ifdef __CUDACC__ /** * Based on: https://github.com/pjreddie/darknet/blob/master/src/im2col_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ int kH; __shared__ int kW; __shared__ int sH; __shared__ int sW; __shared__ int pH; __shared__ int pW; __shared__ int dH; __shared__ int dW; __shared__ int poolingMode; __shared__ T extraParam0; __shared__ int batchSize; __shared__ int inChannels; __shared__ int outH; __shared__ int outW; __shared__ int inH; __shared__ int inW; //__shared__ int *strideIn; //__shared__ int *strideOut; __shared__ int strideB; __shared__ int strideC; __shared__ int strideY; __shared__ int strideX; __shared__ int strideOB; __shared__ int strideOC; __shared__ int strideOY; __shared__ int strideOX; __shared__ int length; __shared__ int kHEff; __shared__ int kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { kH = (int)extraParams[0]; kW = (int)extraParams[1]; sH = (int)extraParams[2]; sW = (int)extraParams[3]; pH = (int)extraParams[4]; pW = (int)extraParams[5]; dH = (int)extraParams[6]; //Dilation, height dimension dW = (int)extraParams[7]; //Dilation, width dimension poolingMode = (int)extraParams[9]; extraParam0 = extraParams[10]; batchSize = shape::sizeAt(xShapeBuffer, 0); inChannels = shape::sizeAt(xShapeBuffer, 1); outH = shape::sizeAt(resultShapeBuffer, 2); outW = shape::sizeAt(resultShapeBuffer, 3); inH = shape::sizeAt(xShapeBuffer, 2); inW = shape::sizeAt(xShapeBuffer, 3); strideB = shape::stride(xShapeBuffer)[0]; strideC = shape::stride(xShapeBuffer)[1]; strideY = shape::stride(xShapeBuffer)[2]; strideX = shape::stride(xShapeBuffer)[3]; strideOB = shape::stride(resultShapeBuffer)[0]; strideOC = shape::stride(resultShapeBuffer)[1]; strideOY = shape::stride(resultShapeBuffer)[2]; strideOX = shape::stride(resultShapeBuffer)[3]; length = shape::length(resultShapeBuffer); //Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH-1)*(dH-1); kWEff = kW + (kW-1)*(dW-1); fOrder = shape::order(resultShapeBuffer) == 'f'; /* if (blockIdx.x == 0) { printf("kH: %i; kW: %i; sH: %i; sW: %i; pH: %i; pW: %i; dH: %i; dW: %i; poolingMode: %i; extraParam0: %f;\n", kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, (float) extraParam0); printf("batchSize: %i; inChannels: %i; outH: %i; outW: %i; inH: %i; inW: %i; strideB: %i; strideC: %i; strideY: %i; strideX: %i;\n", batchSize, inChannels, outH, outW, inH, inW, strideB, strideC, strideY, strideX); } */ } __syncthreads(); int tid = blockIdx.x * gridDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % outW; const int ph = (index / outW) % outH; const int c = (index / outW / outH) % inChannels; const int n = index / outW / outH / inChannels; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; // const int hSO = hstart; // const int hEO = hend; if(hstart < 0){ int f = (int)nd4j::math::nd4j_ceil<T>((T) -hstart / (T)dH); hstart += f * dH; } if(wstart < 0){ int f = (int)nd4j::math::nd4j_ceil<T>((T) -wstart / (T) dW); wstart += f * dW; } if(hend > inH){ int f = (int)nd4j::math::nd4j_ceil<T>((T) (hend-inH) / (T) dH); hend -= f * dH; } if(wend > inW){ int f = (int)nd4j::math::nd4j_ceil<T>((T) (wend-inW) / (T) dW); wend -= f * dW; } int pool_size = (int)(nd4j::math::nd4j_ceil<T>((T) (hend-hstart) / (T) dH) * (int) nd4j::math::nd4j_ceil<T>((T) (wend-wstart) / (T) dW)); //Accounts for dilation T sum = poolingMode == 0 ? (T) -MAX_FLOAT : (T) 0; T *input_slice = dx + (n * strideB + c * strideC); if (poolingMode == 0) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { T v = input_slice[h * strideY + w * strideX]; if (v > sum) sum = v; } } } else if (poolingMode == 1) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { sum += input_slice[h * strideY + w * strideX]; } } } else if (poolingMode == 2) { for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { sum += nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_abs<T>(input_slice[h * strideY + w * strideX]), extraParam0); } } } T res; if (poolingMode == 0) { res = sum; } else if (poolingMode == 1) { int divide_factor = pool_size; //Case 0: exclude padding if ((int) extraParam0 == 1) //Case 1: include padding divide_factor = kH * kW; res = sum / divide_factor; } else if (poolingMode == 2) { res = nd4j::math::nd4j_pow<T>(sum, (T) 1.0f / extraParam0); } if (!fOrder) { result[index] = res; } else { result[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = res; } /* if (index >= 0 && index < 400000) { printf("index: %i; hstart: %i; hend: %i; wstart: %i; wend: %i; ph: %i; pw: %i; hstart_orig: %i; hend_orig: %i;\n", index, hstart, hend, wstart, wend, ph, pw, hSO, hEO); } */ } } #endif static void execSpecial(T *in, Nd4jLong *inShapeBuffer, T *out, Nd4jLong *outShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const Nd4jLong kH = (int)extraParams[0]; const Nd4jLong kW = (int)extraParams[1]; const Nd4jLong sH = (int)extraParams[2]; const Nd4jLong sW = (int)extraParams[3]; const Nd4jLong pH = (int)extraParams[4]; const Nd4jLong pW = (int)extraParams[5]; const Nd4jLong dH = (int)extraParams[6]; const Nd4jLong dW = (int)extraParams[7]; Nd4jLong poolingMode = (int)extraParams[9]; T extraParam0 = extraParams[10]; const Nd4jLong kHEff = kH + (kH-1)*(dH-1); const Nd4jLong kWEff = kW + (kW-1)*(dW-1); const int bS = shape::sizeAt(inShapeBuffer, 0); const int iC = shape::sizeAt(inShapeBuffer, 1); const int iH = shape::sizeAt(inShapeBuffer, 2); const int iW = shape::sizeAt(inShapeBuffer, 3); const int oH = shape::sizeAt(outShapeBuffer, 2); const int oW = shape::sizeAt(outShapeBuffer, 3); const Nd4jLong iStride0 = shape::stride(inShapeBuffer)[0]; const Nd4jLong iStride1 = shape::stride(inShapeBuffer)[1]; const Nd4jLong iStride2 = shape::stride(inShapeBuffer)[2]; const Nd4jLong iStride3 = shape::stride(inShapeBuffer)[3]; const Nd4jLong oStride0 = shape::stride(outShapeBuffer)[0]; const Nd4jLong oStride1 = shape::stride(outShapeBuffer)[1]; const Nd4jLong oStride2 = shape::stride(outShapeBuffer)[2]; const Nd4jLong oStride3 = shape::stride(outShapeBuffer)[3]; const Nd4jLong iStep2 = dH*iStride2; const Nd4jLong iStep3 = dW*iStride3; const int kProd = kH*kW; const T iStep2Inv = 1./iStep2; const T iStep3Inv = 1./iStep3; Nd4jLong hstart, wstart, hend, wend; T sum, *pIn; if(poolingMode == 0) { // max #pragma omp parallel for schedule(guided) private(pIn, sum, hstart, wstart, hend, wend) for(int b = 0; b < bS; ++b) { for(int c = 0; c < iC; ++c) { for(int oh = 0; oh < oH; ++oh) { for(int ow = 0; ow < oW; ++ow) { pIn = in + b * iStride0 + c * iStride1; hstart = oh * sH - pH; wstart = ow * sW - pW; hend = hstart + kHEff; wend = wstart + kWEff; if(hstart < 0) hstart += dH * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)-hstart / dH); if(wstart < 0) wstart += dW * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)-wstart / dW); if(hend > iH) hend -= dH * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)(hend-iH) / dH); if(wend > iW) wend -= dW * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)(wend-iW) / dW); hstart *= iStride2; hend *= iStride2; wstart *= iStride3; wend *= iStride3; sum = -MAX_FLOAT; for (Nd4jLong kh = hstart; kh < hend; kh += iStep2) for (Nd4jLong kw = wstart; kw < wend; kw += iStep3) { T val = pIn[kh + kw]; if (val > sum) sum = val; } out[b * oStride0 + c * oStride1 + oh * oStride2 + ow * oStride3] = sum; } } } } } /*************************************************************************/ else if(poolingMode == 1) { // avg #pragma omp parallel for schedule(guided) private(pIn, sum, hstart, wstart, hend, wend) for(int b = 0; b < bS; ++b) { for(int c = 0; c < iC; ++c) { for(int oh = 0; oh < oH; ++oh) { for(int ow = 0; ow < oW; ++ow) { pIn = in + b * iStride0 + c * iStride1; hstart = oh * sH - pH; wstart = ow * sW - pW; hend = hstart + kHEff; wend = wstart + kWEff; if(hstart < 0) hstart += dH * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)-hstart / dH); if(wstart < 0) wstart += dW * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)-wstart / dW); if(hend > iH) hend -= dH * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)(hend-iH) / dH); if(wend > iW) wend -= dW * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)(wend-iW) / dW); hstart *= iStride2; hend *= iStride2; wstart *= iStride3; wend *= iStride3; sum = static_cast<T>(0.); for (Nd4jLong kh = hstart; kh < hend; kh += iStep2) for (Nd4jLong kw = wstart; kw < wend; kw += iStep3) sum += pIn[kh + kw]; if ((int) extraParam0 == 0) //Exclude padding sum /= (Nd4jLong)nd4j::math::nd4j_ceil<T>((hend-hstart) * iStep2Inv) * (Nd4jLong)nd4j::math::nd4j_ceil<T>((wend-wstart) * iStep3Inv); //Accounts for dilation else if ((int) extraParam0 == 1) //Include padding sum /= kProd; out[b * oStride0 + c * oStride1 + oh * oStride2 + ow * oStride3] = sum; } } } } } /*************************************************************************/ else if(poolingMode == 2) { // pnorm #pragma omp parallel for schedule(guided) private(pIn, sum, hstart, wstart, hend, wend) for(int b = 0; b < bS; ++b) { for(int c = 0; c < iC; ++c) { for(int oh = 0; oh < oH; ++oh) { for(int ow = 0; ow < oW; ++ow) { pIn = in + b * iStride0 + c * iStride1; hstart = oh * sH - pH; wstart = ow * sW - pW; hend = hstart + kHEff; wend = wstart + kWEff; if(hstart < 0) hstart += dH * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)-hstart / dH); if(wstart < 0) wstart += dW * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)-wstart / dW); if(hend > iH) hend -= dH * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)(hend-iH) / dH); if(wend > iW) wend -= dW * (Nd4jLong)nd4j::math::nd4j_ceil<T>((T)(wend-iW) / dW); hstart *= iStride2; hend *= iStride2; wstart *= iStride3; wend *= iStride3; sum = static_cast<T>(0.); for (Nd4jLong kh = hstart; kh < hend; kh += iStep2) for (Nd4jLong kw = wstart; kw < wend; kw += iStep3) sum += nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_abs<T>(pIn[kh + kw]), extraParam0); sum = nd4j::math::nd4j_pow<T>(sum, (T) 1. / extraParam0); out[b * oStride0 + c * oStride1 + oh * oStride2 + ow * oStride3] = sum; } } } } } else { nd4j_printf("Special_ops::pooling2d: pooling mode argument can take three values only: 0, 1, 2, but got %i instead !\n", poolingMode); throw ""; } } op_def static T op(T d1, T *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** * A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; FORCEINLINE bool is_a_ge_zero_and_a_lt_b(int a, int b) { return static_cast<unsigned>(a) < static_cast<unsigned>(b); } template<typename T> class Im2col { public: static const bool requiresSpecial = true; static _CUDA_HD int outSize(int size, int k, int s, int p, bool coverAll) { if (coverAll) return (size + p * 2 - k + s - 1) / s + 1; else return (size + p * 2 - k) / s + 1; } #ifdef __CUDACC__ /** * Based on: https://github.com/pjreddie/darknet/blob/master/src/im2col_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { /*kernel[0], kernel[1], stride[0], stride[1], padding[0], padding[1], 0, false*/ int kernelHeight = (int)extraParams[0]; int kernelWidth = (int)extraParams[1]; int strideY = (int)extraParams[2]; int strideX = (int)extraParams[3]; int padHeight = (int)extraParams[4]; int padWidth = (int)extraParams[5]; int dY = (int)extraParams[6]; //Dilation, height/y dimension int dX = (int)extraParams[7]; //Dilation, width/x dimension int kSize = kernelWidth * kernelHeight; T zeroPadVal = (T)extraParams[9]; //Value to use when value is padding. Usually 0 but not always auto outShape = shape::shapeOf(resultShapeBuffer); auto resultOrder = shape::order(resultShapeBuffer); auto outStride = shape::stride(resultShapeBuffer); auto inShape = shape::shapeOf(xShapeBuffer); auto inStride = shape::stride(xShapeBuffer); int samples = inShape[0]; int depth = inShape[1]; int height = inShape[2]; int width = inShape[3]; int strideex = inStride[0]; int stridech = inStride[1]; int strideh = inStride[2]; int stridew = inStride[3]; // (height + 2 * padHeight - kernelHeight) / strideX + 1; // // (width + 2 * padWidth - kernelWidth) / strideY + 1; // int height_col = outShape[4]; int width_col = outShape[5]; int n = samples * depth * height_col * width_col; /* if (threadIdx.x == 0) printf("Kernel h: [%i], w: [%i]; Col h: [%i], w: [%i]; Stride x: [%i], y: [%i]; Height: [%i], Width: [%i], Depth: [%i], N: [%i], Samples: [%i]\n", kernelHeight, kernelWidth, height_col, width_col, strideX, strideY, height, width, depth, n, samples); */ int index = blockIdx.x * blockDim.x + threadIdx.x; for (; index < n; index += blockDim.x*gridDim.x) { int h_index = index / width_col; int h_col = h_index % height_col; int w_col = index % width_col; int c_im = h_index / height_col; int c_col = c_im * kSize; int depth_im = c_im % depth; int num_im = c_im / depth; int h_offset = h_col * strideY - padHeight; int w_offset = w_col * strideX - padWidth; T* data_col_ptr = result; int i_c = (c_col * height_col + h_col) * width_col + w_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; T* data_im_ptr = dx; data_im_ptr += num_im * strideex + depth_im * stridech + h_offset * strideh + w_offset*stridew; for (int i = 0; i < kernelHeight; ++i) { for (int j = 0; j < kernelWidth; ++j) { int h_im = h_offset + i * dY; int w_im = w_offset + j * dX; int i_f = 0; int i_c_temp = i_c; for (int dim = 5; dim >= 0; dim--) { i_f += (i_c_temp % outShape[dim]) * outStride[dim]; i_c_temp = i_c_temp / outShape[dim]; } if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width){ result[i_f] = data_im_ptr[i * dY * strideh + j * dX * stridew]; } else result[i_f] = zeroPadVal; //result[i_f] = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * strideh + j*stridew] : 0; data_col_ptr += height_col * width_col; i_c += height_col * width_col; } } } } #endif static void execSpecial( T *imBuff, Nd4jLong *imShapeBuffer, T *colBuff, Nd4jLong *colShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { /*kernel[0], kernel[1], stride[0], stride[1], padding[0], padding[1], 0, false*/ // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW] int kH = (int)extraParams[0]; int kW = (int)extraParams[1]; int sH = (int)extraParams[2]; int sW = (int)extraParams[3]; int pH = (int)extraParams[4]; int pW = (int)extraParams[5]; int dH = (int)extraParams[6]; //Dilation, height/y dimension int dW = (int)extraParams[7]; //Dilation, width/x dimension auto colShape = shape::shapeOf(colShapeBuffer); auto colStride = shape::stride(colShapeBuffer); auto imShape = shape::shapeOf(imShapeBuffer); auto imStride = shape::stride(imShapeBuffer); const int bS = imShape[0]; const int iC = imShape[1]; const int iH = imShape[2]; const int iW = imShape[3]; const int oH = colShape[4]; const int oW = colShape[5]; const Nd4jLong colStride0 = colStride[0]; const Nd4jLong colStride1 = colStride[1]; const Nd4jLong colStride2 = colStride[2]; const Nd4jLong colStride3 = colStride[3]; const Nd4jLong colStride4 = colStride[4]; const Nd4jLong colStride5 = colStride[5]; const Nd4jLong imStride0 = imStride[0]; const Nd4jLong imStride1 = imStride[1]; const Nd4jLong imStride2 = imStride[2]; const Nd4jLong imStride3 = imStride[3]; T *col, *im; int imRow, imCol; if (shape::order(imShapeBuffer) == 'c' && shape::order(colShapeBuffer) == 'c' && shape::strideDescendingCAscendingF(imShapeBuffer) && shape::strideDescendingCAscendingF(colShapeBuffer)) { #pragma omp parallel for schedule(static) proc_bind(close) private(col, im, imRow, imCol) for (int b = 0; b < bS; b++) { for (int c = 0; c < iC; ++c) { for (int kRow = 0; kRow < kH; ++kRow) { for (int kCol = 0; kCol < kW; ++kCol) { for (int colH = 0; colH < oH; ++colH) { for (int colW = 0; colW < oW; ++colW) { imRow = (-pH + kRow * dH) + colH*sH; imCol = (-pW + kCol * dW) + colW*sW; col = colBuff + b*colStride0 + c*colStride1 + kRow*colStride2 + kCol*colStride3 + colH*colStride4 + colW*colStride5; im = imBuff + b*imStride0 + c*imStride1 + imRow*imStride2 + imCol*imStride3; if (static_cast<unsigned>(imRow) >= static_cast<unsigned>(iH) || static_cast<unsigned>(imCol) >= static_cast<unsigned>(iW)) *col = static_cast<T>(0.); else *col = *im; } } } } } } } else { #pragma omp parallel for schedule(static) proc_bind(close) private(im, col, imRow, imCol) for (int b = 0; b < bS; b++) { for (int colH = 0; colH < oH; ++colH) { for (int colW = 0; colW < oW; ++colW) { for (int c = 0; c < iC; ++c) { for (int kRow = 0; kRow < kH; ++kRow) { for (int kCol = 0; kCol < kW; ++kCol) { imRow = (-pH + kRow * dH) + colH*sH; imCol = (-pW + kCol * dW) + colW*sW; col = colBuff + b*colStride0 + c*colStride1 + kRow*colStride2 + kCol*colStride3 + colH*colStride4 + colW*colStride5; im = imBuff + b*imStride0 + c*imStride1 + imRow*imStride2 + imCol*imStride3; if (static_cast<unsigned>(imRow) >= static_cast<unsigned>(iH) || static_cast<unsigned>(imCol) >= static_cast<unsigned>(iW)) *col = static_cast<T>(0.); else *col = *im; } } } } } } } } op_def static T op(T d1, T *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** * A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; template<typename T> class Histogram { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { int numBins = (int) extraParams[0]; T min_val = extraParams[1]; T max_val = extraParams[2]; int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ T *bins; __shared__ int length; __shared__ T *reductor; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; bins = (T *) shmem; reductor = ((T *) allocationPointer) + (numBins * blockIdx.x); length = shape::length(xShapeBuffer); } __syncthreads(); T binSize = (max_val - min_val) / (numBins); for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] = (T) 0.0f; } __syncthreads(); for (int e = tid; e < length; e+= blockDim.x * gridDim.x) { int idx = (int) ((dx[e] - min_val) / binSize); if (idx < 0) idx = 0; else if (idx >= numBins) idx = numBins - 1; nd4j::math::atomics::nd4j_atomicAdd(&bins[idx], (T) 1.0f); } __syncthreads(); // transfer shared memory to reduction memory if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionPointer; __shared__ bool amLast; for (int e = threadIdx.x; e < numBins; e += blockDim.x) { reductor[e] = bins[e]; } __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; // nullify shared memory for future accumulation for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] = (T) 0.0f; } // accumulate reduced bins for (int r = 0; r < gridDim.x; r++) { T *ptrBuf = ((T *)allocationPointer) + (r * numBins); for (int e = threadIdx.x; e < numBins; e += blockDim.x) { bins[e] += ptrBuf[e]; } } __syncthreads(); // write them out to Z for (int e = threadIdx.x; e < numBins; e += blockDim.x) { result[e] = bins[e]; } } } else { // if there's only 1 block - just write away data for (int e = threadIdx.x; e < numBins; e += blockDim.x) { result[e] = bins[e]; } } }; #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { int length = shape::length(xShapeBuffer); int _threads = 2; int numBins = (int) extraParams[0]; int span = (length / _threads) + 8; // get min over input T min_val = extraParams[1]; T max_val = extraParams[2]; /* #pragma omp parallel for simd num_threads(_threads) if (_threads > 1) reduction(min:min_val) proc_bind(close) for (int x = 0; x < length; x++) { if (min_val > dx[x]) min_val = dx[x]; } // get max over input T max_val = (T) MIN_FLOAT; #pragma omp parallel for simd num_threads(_threads) if (_threads > 1) reduction(max:max_val) proc_bind(close) for (int x = 0; x < length; x++) { if (max_val < dx[x]) max_val = dx[x]; } */ T binSize = (max_val - min_val) / (numBins); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(close) default(shared) { int tid, start, end; int *bins = new int[numBins]; std::memset(bins, 0, sizeof(int) * numBins); tid = omp_get_thread_num(); start = span * tid; end = span * (tid + 1); if (end > length) end = length; #pragma omp simd for (int x = start; x < end; x++) { int idx = (int) ((dx[x] - min_val) / binSize); if (idx < 0) idx = 0; else if (idx >= numBins) idx = numBins - 1; bins[idx]++; } #pragma omp critical { #pragma omp simd for (int x = 0; x < numBins; x++) { result[x] += bins[x]; } } delete[] bins; } } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class Col2Im { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * https://github.com/pjreddie/darknet/blob/master/src/col2im_kernels.cu */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto inShape = shape::shapeOf(xShapeBuffer); auto inStride = shape::stride(xShapeBuffer); int strideex = inStride[0]; int stridech = inStride[1]; int stridekrow = inStride[2]; int stridekcol = inStride[3]; int striderow = inStride[4]; int stridecol = inStride[5]; int kernelHeight = inShape[2]; int kernelWidth = inShape[3]; // C int strideY = (int)extraParams[0]; int strideX = (int)extraParams[1]; int padHeight = (int)extraParams[2]; int padWidth = (int)extraParams[3]; int imgHeight = (int)extraParams[4]; int imgWidth = (int)extraParams[5]; int dY = (int)extraParams[6]; //Dilation in height/y dimension int dX = (int)extraParams[7]; //Dilation in width/x dimension auto outShape = shape::shapeOf(resultShapeBuffer); auto resultOrder = shape::order(resultShapeBuffer); auto outStride = shape::stride(resultShapeBuffer); int samples = outShape[0]; int depth = outShape[1]; int imgH = outShape[2]; int imgW = outShape[3]; int height_col = inShape[4];//(imgHeight + 2 * padHeight - kernelHeight) / strideX + 1; int width_col = inShape[5];//(imgWidth + 2 * padWidth - kernelWidth) / strideY + 1; int n = samples * depth * imgHeight * imgWidth; /*if (threadIdx.x == 0) printf("Kernel h: [%i], w: [%i]; Col h: [%i], w: [%i]; Stride x: [%i], y: [%i]; Height: [%i], Width: [%i], Depth: [%i], N: [%i], Samples: [%i]\n", kernelHeight, kernelWidth, height_col, width_col, strideX, strideY, imgHeight, imgWidth, depth, n, samples);*/ //Effective kernel size, accounting for dilation int kEffectiveW = kernelWidth + (kernelWidth - 1) * (dX - 1); int kEffectiveH = kernelHeight + (kernelHeight - 1) * (dY - 1); for (int i = (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { T val = 0; int w_im = i % imgWidth + padWidth; int h_im = (i / imgWidth) % imgHeight + padHeight; int c_im = i / (imgWidth * imgHeight); int num_im = c_im / depth; int depth_im = c_im % depth; // compute the start and end of the output // These are the indexes for dimensions ??? in the 6d col matrix int w_col_start = (w_im < kEffectiveW) ? 0 : (w_im - kEffectiveW) / strideX + 1; int w_col_end = nd4j::math::nd4j_min<int>(w_im / strideX + 1, width_col); int h_col_start = (h_im < kEffectiveH) ? 0 : (h_im - kEffectiveH) / strideY + 1; int h_col_end = nd4j::math::nd4j_min<int>(h_im / strideY + 1, height_col); //Iterate over col entries in the 6d array... these are added up for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) { for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) { int h_k = (h_im - h_col * strideY); int w_k = (w_im - w_col * strideX); if(h_k % dY == 0 && w_k % dX == 0){ h_k /= dY; w_k /= dX; int data_col_index = num_im * strideex + depth_im * stridech + h_k * stridekrow + w_k * stridekcol + h_col * striderow + w_col * stridecol; val += dx[data_col_index]; } } } int i_f = 0; int i_c = i; for (int dim = 3; dim >= 0; dim--) { i_f += (i_c % outShape[dim]) * outStride[dim]; i_c = i_c / outShape[dim]; } result[i_f] = val; } } #endif static void execSpecial( T *colBuff, Nd4jLong *colShapeBuffer, T *imBuff, Nd4jLong *imShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { // [bS, iC, kH, kW, oH, oW] is de-convoluted to [bS, iC, iH, iW] auto colShape = shape::shapeOf(colShapeBuffer); auto colStride = shape::stride(colShapeBuffer); auto imShape = shape::shapeOf(imShapeBuffer); auto imStride = shape::stride(imShapeBuffer); const int sH = (int)extraParams[0]; const int sW = (int)extraParams[1]; const int pH = (int)extraParams[2]; const int pW = (int)extraParams[3]; const int iH = (int)extraParams[4]; const int iW = (int)extraParams[5]; const int dH = (int)extraParams[6]; const int dW = (int)extraParams[7]; const int bS = imShape[0]; const int iC = imShape[1]; const int kH = colShape[2]; const int kW = colShape[3]; const int oH = colShape[4]; const int oW = colShape[5]; const Nd4jLong colStride0 = colStride[0]; const Nd4jLong colStride1 = colStride[1]; const Nd4jLong colStride2 = colStride[2]; const Nd4jLong colStride3 = colStride[3]; const Nd4jLong colStride4 = colStride[4]; const Nd4jLong colStride5 = colStride[5]; const Nd4jLong imStride0 = imStride[0]; const Nd4jLong imStride1 = imStride[1]; const Nd4jLong imStride2 = imStride[2]; const Nd4jLong imStride3 = imStride[3]; // initial zeroing of image content const Nd4jLong imEWS = shape::elementWiseStride(imShapeBuffer); if( imEWS == 1) memset(imBuff, 0, shape::length(imShapeBuffer) * sizeof(T)); else #pragma omp parallel for schedule(static) proc_bind(close) for (int i = 0; i < shape::length(imShapeBuffer); i+=imEWS) *(imBuff + i) = 0.f; T *col, *im; int imRow, imCol; if (shape::order(colShapeBuffer) == 'c' && shape::order(imShapeBuffer) == 'c' && shape::strideDescendingCAscendingF(colShapeBuffer) && shape::strideDescendingCAscendingF(imShapeBuffer)) { #pragma omp parallel for schedule(static) proc_bind(close) private(col, im, imRow, imCol) for (int b = 0; b < bS; b++) { for (int c = 0; c < iC; ++c) { for (int kRow = 0; kRow < kH; ++kRow) { for (int kCol = 0; kCol < kW; ++kCol) { for (int colH = 0; colH < oH; ++colH) { for (int colW = 0; colW < oW; ++colW) { imRow = (-pH + kRow * dH) + colH*sH; imCol = (-pW + kCol * dW) + colW*sW; col = colBuff + b*colStride0 + c*colStride1 + kRow*colStride2 + kCol*colStride3 + colH*colStride4 + colW*colStride5; im = imBuff + b*imStride0 + c*imStride1 + imRow*imStride2 + imCol*imStride3; if (static_cast<unsigned>(imRow) < static_cast<unsigned>(iH) && static_cast<unsigned>(imCol) < static_cast<unsigned>(iW)) *im += *col; } } } } } } } else { #pragma omp parallel for schedule(static) proc_bind(close) private(im, col, imRow, imCol) for (int b = 0; b < bS; b++) { for (int colH = 0; colH < oH; ++colH) { for (int colW = 0; colW < oW; ++colW) { for (int c = 0; c < iC; ++c) { for (int kRow = 0; kRow < kH; ++kRow) { for (int kCol = 0; kCol < kW; ++kCol) { imRow = (-pH + kRow * dH) + colH*sH; imCol = (-pW + kCol * dW) + colW*sW; col = colBuff + b*colStride0 + c*colStride1 + kRow*colStride2 + kCol*colStride3 + colH*colStride4 + colW*colStride5; im = imBuff + b*imStride0 + c*imStride1 + imRow*imStride2 + imCol*imStride3; if (static_cast<unsigned>(imRow) < static_cast<unsigned>(iH) && static_cast<unsigned>(imCol) < static_cast<unsigned>(iW)) *im += *col; } } } } } } } } op_def static T op(T d1, T *params) { return d1; } /** Calculate buffer offset (like Shape.getOffset) without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 4 */ static _CUDA_HD int getOffsetUnsafe4(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[2] != 1) offset += indices[2] * stride[2]; if (shape[3] != 1) offset += indices[3] * stride[3]; return offset; } /** A version of Shape.getOffset without checking on input for negative indices etc * normally negative indices are bad, OK here because of other checks on input indices * Uses unrolled loop specifically for length 6, where indices[2] and indices[3] are zero (always are here) */ static _CUDA_HD int getOffsetUnsafe6(int baseOffset, int *shape, int *stride, int *indices) { int offset = baseOffset; if (shape[0] != 1) offset += indices[0] * stride[0]; if (shape[1] != 1) offset += indices[1] * stride[1]; if (shape[4] != 1) offset += indices[4] * stride[4]; if (shape[5] != 1) offset += indices[5] * stride[5]; return offset; } }; template<typename T> class Reverse { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *zShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { __shared__ Nd4jLong xLength; __shared__ int xEWS; __shared__ char xOrder; __shared__ Nd4jLong sLength; __shared__ T *shmem; int tid = threadIdx.x + blockIdx.x * blockDim.x; if (threadIdx.x == 0) { xLength = shape::length(xShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); xOrder = shape::order(xShapeBuffer); sLength = xLength - 1; extern __shared__ unsigned char shrd[]; shmem = (T *) shrd; } __syncthreads(); if (dx == result) { if (xEWS == 1) { for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { Nd4jLong idx = sLength - e; T tmp = dx[e]; dx[e] = dx[idx]; dx[idx] = tmp; } } else if (xEWS >= 1) { for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { Nd4jLong idx1 = (sLength - e) * xEWS; Nd4jLong idx2 = e * xEWS; T tmp = dx[idx2]; dx[idx2] = dx[idx1]; dx[idx1] = tmp; } } else { __shared__ int xRank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *xStride; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); xStride = shape::stride(xShapeBuffer); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; for (int e = tid; e < xLength / 2; e += blockDim.x * gridDim.x) { if (xOrder == 'c') { shape::ind2subC(xRank, xShape, e, xCoord); shape::ind2subC(xRank, xShape, sLength - e, zCoord); } else { shape::ind2sub(xRank, xShape, e, xCoord); shape::ind2sub(xRank, xShape, sLength - e, zCoord); } auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank); result[zOffset] = dx[xOffset]; } } } else { __shared__ int zEWS; __shared__ char zOrder; if (threadIdx.x == 0) { zEWS = shape::elementWiseStride(zShapeBuffer); zOrder = shape::order(zShapeBuffer); } __syncthreads(); if (xEWS == 1 && zEWS == 1 && xOrder == zOrder) { // loop for whole array for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { result[sLength - e] = dx[e]; } } else if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) { for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { result[(sLength - e) * zEWS] = dx[e * xEWS]; } } else { __shared__ int xRank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *xStride; __shared__ int zRank; __shared__ Nd4jLong *zShape; __shared__ Nd4jLong *zStride; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); xStride = shape::stride(xShapeBuffer); zRank = shape::rank(zShapeBuffer); zShape = shape::shapeOf(zShapeBuffer); zStride = shape::stride(zShapeBuffer); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; for (int e = tid; e < xLength; e += blockDim.x * gridDim.x) { if (xOrder == 'c') { shape::ind2subC(xRank, xShape, e, xCoord); shape::ind2subC(xRank, xShape, sLength - e, zCoord); } else { shape::ind2sub(xRank, xShape, e, xCoord); shape::ind2sub(xRank, xShape, sLength - e, zCoord); } auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank); result[zOffset] = dx[xOffset]; } } } } #endif static void execSpecial(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *zShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { Nd4jLong xLength = shape::length(xShapeBuffer); int xEWS = shape::elementWiseStride(xShapeBuffer); char xOrder = shape::order(xShapeBuffer); Nd4jLong sLength = xLength - 1; // two step phase here if (dx == result) { if (xEWS == 1) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { Nd4jLong idx = sLength - e; T tmp = dx[e]; dx[e] = dx[idx]; dx[idx] = tmp; } } else if (xEWS > 1) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { Nd4jLong idx1 = (sLength - e) * xEWS; Nd4jLong idx2 = e * xEWS; T tmp = dx[idx2]; dx[idx2] = dx[idx1]; dx[idx1] = tmp; } } else { int xRank = shape::rank(xShapeBuffer); auto xShape = shape::shapeOf(xShapeBuffer); auto xStride = shape::stride(xShapeBuffer); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; #pragma omp parallel for private(xCoord, zCoord) schedule(guided) for (Nd4jLong e = 0; e < xLength / 2; e++) { if (xOrder == 'c') { shape::ind2subC(xRank, xShape, e, xCoord); shape::ind2subC(xRank, xShape, sLength - e, zCoord); } else { shape::ind2sub(xRank, xShape, e, xCoord); shape::ind2sub(xRank, xShape, sLength - e, zCoord); } auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, xShape, xStride, zCoord, xRank); result[zOffset] = dx[xOffset]; } } } else { // single step phase here auto zEWS = shape::elementWiseStride(zShapeBuffer); auto zOrder = shape::order(zShapeBuffer); if (xEWS == 1 && zEWS == 1 && xOrder == zOrder) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { result[sLength - e] = dx[e]; } } else if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) { #pragma omp parallel for schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { result[(sLength - e) * zEWS] = dx[e * xEWS]; } } else { auto xRank = shape::rank(xShapeBuffer); auto xShape = shape::shapeOf(xShapeBuffer); auto xStride = shape::stride(xShapeBuffer); auto zRank = shape::rank(zShapeBuffer); auto zShape = shape::shapeOf(zShapeBuffer); auto zStride = shape::stride(zShapeBuffer); Nd4jLong xCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; #pragma omp parallel for private(xCoord, zCoord) schedule(guided) for (Nd4jLong e = 0; e < xLength; e++) { if (xOrder == 'c') shape::ind2subC(xRank, xShape, e, xCoord); else shape::ind2sub(xRank, xShape, e, xCoord); if (zOrder == 'c') shape::ind2subC(zRank, zShape, (sLength - e), zCoord); else shape::ind2sub(zRank, zShape, (sLength - e), zCoord); auto xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); auto zOffset = shape::getOffset(0, zShape, zStride, zCoord, zRank); result[zOffset] = dx[xOffset]; } } } } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class SoftMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto shape = shape::shapeOf(xShapeBuffer); __shared__ T maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; auto length = shape::length(xShapeBuffer); auto stride = shape::stride(xShapeBuffer); //compute the row wise maxes __shared__ Nd4jLong maxShape[2]; // it's always 2d here __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) { maxResult = (T) 0.0; maxShape[0] = shape[0]; maxShape[1] = 1; maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer); } __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (shape::isMatrix(xShapeBuffer)) { auto shape = shape::shapeOf(xShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; //compute the row wise maxes std::vector<T> maxResult(shape[0]); for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape); functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(dx, xShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); delete[] maxResultShapeBuffer; } else if (shape::isVector(xShapeBuffer)) { T max = -FLOAT_MAX_VALUE; T sum = 0; int elementWiseStride = shape::elementWiseStride(xShapeBuffer); int resultElementWiseStride = shape::elementWiseStride(resultShapeBuffer); int length = shape::length(xShapeBuffer); if (elementWiseStride >= 1 && resultElementWiseStride >= 1) { if (elementWiseStride == 1 && resultElementWiseStride == 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, dx[i]); } #pragma omp parallel for simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] = nd4j::math::nd4j_exp<T>(dx[i] - max); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; } } else { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, dx[i * elementWiseStride]); } #pragma omp parallel for simd reduction(sumT:sum) for (int i = 0; i < length; i++) { T r = nd4j::math::nd4j_exp<T>(dx[i * elementWiseStride] - max); result[i * resultElementWiseStride] = r; sum += r; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * resultElementWiseStride] /= sum; } } } } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; template<typename T> class LogSoftMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto shape = shape::shapeOf(xShapeBuffer); auto stride = shape::stride(xShapeBuffer); //iterate along rows __shared__ T maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; if (threadIdx.x == 0) { maxResult = (T) 0.0; } __syncthreads(); //compute the row wise maxes Nd4jLong maxShape[2] = { shape[0], 1 }; __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer); __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); functions::transform::Transform<T>::template transformCuda<simdOps::Log<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (shape::isMatrix(xShapeBuffer, 2)) { auto shape = shape::shapeOf(xShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; //compute the row wise maxes std::vector <T> maxResult(shape[0]); #pragma omp simd for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape); functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(dx, xShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); functions::transform::Transform<T>::template exec<simdOps::Log<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); delete[] maxResultShapeBuffer; } else if (shape::isVector(xShapeBuffer, 2)) { T max = -FLOAT_MAX_VALUE; T sum = 0; auto elementWiseStride = shape::elementWiseStride(xShapeBuffer); auto length = shape::length(xShapeBuffer); if (elementWiseStride == 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] = nd4j::math::nd4j_exp<T>(dx[i] - max); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; result[i] = nd4j::math::nd4j_log<T>(result[i]); } } else if (elementWiseStride > 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i * elementWiseStride]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i * elementWiseStride] = nd4j::math::nd4j_exp<T>(dx[i * elementWiseStride] - max); sum += result[i * elementWiseStride]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] /= sum; result[i * elementWiseStride] = nd4j::math::nd4j_log<T>(result[i * elementWiseStride]); } } } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; /** * softmax(x) */ template<typename T> class SoftMaxDerivative { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto shape = shape::shapeOf(xShapeBuffer); __shared__ T maxResult; __shared__ Nd4jLong *maxResultShapeBuffer; __shared__ Nd4jLong resultEWS; auto length = shape::length(xShapeBuffer); if (threadIdx.x == 0) { resultEWS = shape::elementWiseStride(resultShapeBuffer); maxResult = (T) 0.0; } __syncthreads(); auto tride = shape::stride(xShapeBuffer); Nd4jLong maxShape[2] = { shape[0], 1 }; __shared__ Nd4jLong tempBuffer[8]; if (threadIdx.x == 0) maxResultShapeBuffer = shape::shapeBuffer(2, maxShape, tempBuffer); __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //subtract max of each row functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Subtract<T>>(maxResult, dx, xShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template transformCuda<simdOps::Exp<T>>(result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); __syncthreads(); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template execScalarCuda<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, &maxResult, maxResultShapeBuffer, reductionPointer, manager, nullptr); __syncthreads(); //divide by the sum functions::scalar::ScalarTransform<T>::template transformCuda<simdOps::Divide<T>>(maxResult, result, resultShapeBuffer, extraParams, result, resultShapeBuffer, allocationPointer, manager); __syncthreads(); if (resultEWS >= 1) { for (int i = threadIdx.x; i < length; i += blockDim.x) { result[i * resultEWS] = result[i * resultEWS] * ((T) 1.0 - result[i * resultEWS]); } } else { printf("Non element wise stride not supported right now\n"); } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (shape::isMatrix(xShapeBuffer, 2)) { auto shape = shape::shapeOf(xShapeBuffer); auto resultEleStide = shape::elementWiseStride(resultShapeBuffer); //iterate along rows int dimension[1] = { 0 }; int maxDimension[1] = { 1 }; auto len = shape::length(xShapeBuffer); //compute the row wise maxes std::vector <T> maxResult(shape[0]); #pragma omp simd for (int i = 0; i < shape[0]; i++) maxResult[i] = 0.0; Nd4jLong maxShape[2] = { shape[0], 1 }; auto maxResultShapeBuffer = shape::shapeBuffer(2, maxShape); functions::reduce::ReduceFunction<T>::template exec<simdOps::Max<T>>(dx, xShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //subtract max of each row functions::broadcast::Broadcast<T>::template exec<simdOps::Subtract<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); //after subtracting the row wise maxes take the exp functions::transform::Transform<T>::template exec<simdOps::Exp<T>>(result, resultShapeBuffer, result, resultShapeBuffer, extraParams, tadShapeInfo, tadOffsets); //take the sum for the exponential functions::reduce::ReduceFunction<T>::template exec<simdOps::Sum<T>>(result, resultShapeBuffer, extraParams, maxResult.data(), maxResultShapeBuffer, maxDimension, 1, nullptr, nullptr); //divide by the sum functions::broadcast::Broadcast<T>::template exec<simdOps::Divide<T>>(result, resultShapeBuffer, maxResult.data(), maxResultShapeBuffer, result, resultShapeBuffer, dimension, 1, nullptr, nullptr, nullptr, nullptr); if (resultEleStide >= 1) { if (resultEleStide == 1) { #pragma omp simd for (int i = 0; i < len; i++) { result[i] = result[i] * ((T) 1.0f - result[i]); } } else { #pragma omp simd for (int i = 0; i < len; i++) { result[i * resultEleStide] = result[i * resultEleStide] * ((T) 1.0f - result[i * resultEleStide]); } } } else { auto zShape = shape::shapeOf(resultShapeBuffer); auto zStride = shape::stride(resultShapeBuffer); auto zRank = shape::rank(resultShapeBuffer); Nd4jLong zCoord[MAX_RANK]; for (int i = 0; i < len; i++) { shape::ind2subC(zRank,zShape, i, zCoord); Nd4jLong zOffset = shape::getOffset(0, zShape, zStride, zCoord, zRank); result[zOffset] = result[zOffset] * ((T) 1.0f - result[zOffset]); } } delete[] maxResultShapeBuffer; } else if (shape::isVector(xShapeBuffer, 2)) { T max = -FLOAT_MAX_VALUE; T sum = 0; auto elementWiseStride = shape::elementWiseStride(xShapeBuffer); auto length = shape::length(xShapeBuffer); if (elementWiseStride == 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i] -= max; result[i] = nd4j::math::nd4j_exp<T>(result[i]); sum += result[i]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] /= sum; } #pragma omp simd for (int i = 0; i < length; i++) { result[i] = result[i] * ((T) 1.0f - result[i]); } } else if (elementWiseStride >= 1) { #pragma omp simd reduction(maxT:max) for (int i = 0; i < length; i++) { max = nd4j::math::nd4j_max<T>(max, result[i * elementWiseStride]); } #pragma omp simd reduction(sumT:sum) for (int i = 0; i < length; i++) { result[i * elementWiseStride] -= max; result[i * elementWiseStride] = nd4j::math::nd4j_exp<T>(result[i * elementWiseStride]); sum += result[i * elementWiseStride]; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] /= sum; } #pragma omp simd for (int i = 0; i < length; i++) { result[i * elementWiseStride] = result[i * elementWiseStride] * ((T) 1.0f - result[i * elementWiseStride]); } } else { printf("non-ews access on row not implemented yet"); } } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; template<typename T> class IsMax { public: static const bool requiresSpecial = true; #ifdef __CUDACC__ static inline __device__ void doAllCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager) { // this code is safe to delete, it's never used /* __shared__ int maxIdx; __shared__ int length; if (threadIdx.x == 0) { length = shape::length(resultShapeBuffer); } __syncthreads(); functions::indexreduce::IndexReduce<T>::template transform<simdOps::IndexMax<T>>( dx, xShapeBuffer, extraParams, result, resultShapeBuffer, nullptr, 1, 1, allocationPointer, reductionPointer, manager, nullptr, nullptr); __syncthreads(); if (threadIdx.x == 0) maxIdx = (int)result[0]; __syncthreads(); for (int i = threadIdx.x; i < length; i += blockDim.x) result[i] = 0; __syncthreads(); if (threadIdx.x == 0) { result[maxIdx] = 1.0; } */ } #endif #ifdef __CUDACC__ inline __host__ #elif defined(__GNUC__) #endif static void doAll( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams) { auto length = shape::length(xShapeBuffer); auto eleStride = shape::elementWiseStride(xShapeBuffer); auto resultEleStride = shape::elementWiseStride(resultShapeBuffer); auto xOrder = shape::order(xShapeBuffer); auto resultOrder = shape::order(resultShapeBuffer); /* int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); */ if (xOrder == resultOrder && xOrder == 'c') { if (eleStride == 1 && resultEleStride == 1) { if (length < ELEMENT_THRESHOLD) { int maxIdx = 0; T currMax = dx[0]; //#pragma omp simd reduction (max:maxIdx,currMax) for (int i = 0; i < length; i++) { if (currMax < dx[i]) { currMax = dx[i]; maxIdx = i; } result[i] = 0.0; } result[maxIdx] = 1.0; } else { int maxIdx = 0; T currMax = dx[0]; #pragma omp parallel proc_bind(AFFINITY) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i]) { currMaxLocal = dx[i]; maxIdxLocal = i; } result[i] = 0.0; } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } result[maxIdx] = 1.0; } } else { if (length < ELEMENT_THRESHOLD) { int maxIdx = 0; T currMax = dx[0]; //#pragma omp simd reduction(max:maxIdx,currMax) for (int i = 0; i < length; i++) { result[i * resultEleStride] = 0.0; if (currMax < dx[i * eleStride]) { currMax = dx[i * eleStride]; maxIdx = i; } } result[maxIdx * resultEleStride] = 1.0; } else { int maxIdx = 0; T currMax = dx[0]; #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { result[i * resultEleStride] = 0.0; if (currMaxLocal < dx[i * eleStride]) { currMaxLocal = dx[i * eleStride]; maxIdxLocal = i; } } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } result[maxIdx * resultEleStride] = 1.0; } } } else { Nd4jLong shapeIter[MAX_RANK]; Nd4jLong coord[MAX_RANK]; int dim; Nd4jLong xStridesIter[MAX_RANK]; Nd4jLong resultStridesIter[MAX_RANK]; auto xShape = shape::shapeOf(xShapeBuffer); auto xStride = shape::stride(xShapeBuffer); auto resultStride = shape::stride(resultShapeBuffer); auto rank = shape::rank(xShapeBuffer); T *originalResult = result; if (PrepareTwoRawArrayIter<T>(rank, xShape, dx, xStride, result, resultStride, &rank, shapeIter, &dx, xStridesIter, &result, resultStridesIter) >= 0) { T value = dx[0]; int idx = 0; int maxIdx = 0; ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { if (dx[0] > value) { value = dx[0]; maxIdx = idx; } idx++; result[0] = 0.0; } ND4J_RAW_ITER_TWO_NEXT( dim, rank, coord, shapeIter, dx, xStridesIter, result, resultStridesIter); //pointer to where max value would be if (shape::order(resultShapeBuffer) == 'c' || (shape::order(resultShapeBuffer) == 'f' && maxIdx * shape::stride(resultShapeBuffer)[shape::rank(resultShapeBuffer) - 1] >= shape::length(resultShapeBuffer))) originalResult[maxIdx] = 1.0; else originalResult[maxIdx * shape::stride(resultShapeBuffer)[shape::rank(resultShapeBuffer) - 1]] = 1.0; } } } public: #ifdef __CUDACC__ /** * */ static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { // FIXME: MAX_DIMENSION is lower then FP16 frame if (extraParams == nullptr || (int) extraParams[0] == MAX_DIMENSION) { doAllCuda(dx, xShapeBuffer, result, resultShapeBuffer, extraParams, allocationPointer, reductionPointer, manager); } } #endif static void execSpecial( T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { //FIXME: this op should be moved to CustomOps if (extraParams == nullptr || (int)extraParams[0] == 0 || ((int)extraParams[0] == 1 && (int)extraParams[1] == MAX_DIMENSION)) { doAll(dx, xShapeBuffer, result, resultShapeBuffer, extraParams); } else if (shape::isVector(xShapeBuffer)) { auto dimensionLength = (int)extraParams[0]; auto dimension = new int[dimensionLength]; auto length = shape::length(xShapeBuffer); for (int i = 0; i < dimensionLength; i++) { dimension[i] = (int)extraParams[i + 1]; } if (shape::shapeOf(xShapeBuffer)[dimension[0]] == 1) { for (int i = 0; i < length; i++) { result[i] = 1.0; } } else { auto eleStride = shape::elementWiseStride(xShapeBuffer); if (eleStride == 1) { int maxIdx = 0; T currMax = dx[0]; if (length < ELEMENT_THRESHOLD) { //#pragma omp simd reduction(max:maxIdx,currMax) for (int i = 0; i < length; i++) { if (currMax < dx[i]) { currMax = dx[i]; maxIdx = i; } result[i] = 0.0; } } else { #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp simd reduction(max:maxIdxLocal,currMaxLocal) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i]) { currMaxLocal = dx[i]; maxIdxLocal = i; } result[i] = 0.0; } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } } result[maxIdx] = 1.0; } else { int maxIdx = 0; T currMax = dx[0]; if (length < ELEMENT_THRESHOLD) { //#pragma omp parallel for reduction(max:maxIdx,currMax) proc_bind(AFFINITY) for (int i = 0; i < length; i++) { if (currMax < dx[i * eleStride]) { currMax = dx[i * eleStride]; maxIdx = i; } result[i] = 0.0; } } else { #pragma omp parallel proc_bind(AFFINITY) default(shared) { int maxIdxLocal = maxIdx; T currMaxLocal = currMax; //#pragma omp parallel for reduction(max:maxIdx,currMax) proc_bind(AFFINITY) for (int i = 0; i < length; i++) { if (currMaxLocal < dx[i * eleStride]) { currMaxLocal = dx[i * eleStride]; maxIdxLocal = i; } result[i] = 0.0; } #pragma omp critical { if (currMax < currMaxLocal) { currMax = currMaxLocal; maxIdx = maxIdxLocal; } } } } result[maxIdx] = 1.0; } } } else { auto dimensionLength = (int) extraParams[0]; auto dimension = new int[dimensionLength]; #pragma omp simd for (int i = 0; i < dimensionLength; i++) { dimension[i] = (int) extraParams[i + 1]; } //decompose in to several sub tads after //moving all dimensions (in sorted order) //to the back. //permuted version of the x shape info for setting up the tad problem auto tadShapeShapeInfo = tadShapeInfo; shape::TAD tad (xShapeBuffer, dimension, dimensionLength); if(tadShapeInfo==nullptr) { tad.createTadOnlyShapeInfo(); tad.createOffsets(); tadShapeShapeInfo = tad.tadOnlyShapeInfo; tadOffsets = tad.tadOffsets; } auto tadLength = shape::tadLength(xShapeBuffer, dimension, dimensionLength); auto tads = shape::length(xShapeBuffer) / tadLength; int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); auto tadEWS = shape::elementWiseStride(tadShapeShapeInfo); auto zEWS = tadEWS; int span = (tads / num_threads) + 8; #pragma omp parallel num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY) { int tid = omp_get_thread_num(); int start = span * tid; int end = span * (tid + 1); if (end > tads) end = tads; for (int r = start; r < end; r++) { if (tadEWS > 0 && zEWS > 0 && dimensionLength == 1) { T *rX = dx + tadOffsets[r]; T *rZ = result + tadOffsets[r]; T maxValue = rX[0]; int maxIdx = 0; if (tadEWS == 1 && zEWS == 1) { //#pragma omp simd reduction(max:maxValue,maxIdx) for (int i = 0; i < tadLength; i++) { if (rX[i] > maxValue) { maxIdx = i; maxValue = rX[i]; } } #pragma omp simd for (int i = 0; i < tadLength; i++) { rZ[i] = maxIdx == i ? (T) 1.0 : (T) 0.0; } } else { //#pragma omp parallel for reduction(max:maxValue,maxIdx) default(shared) for (int i = 0; i < tadLength; i++) { if (rX[i * tadEWS] > maxValue) { maxIdx = i; maxValue = rX[i * tadEWS]; } } #pragma omp simd for (int i = 0; i < tadLength; i++) { rZ[i * zEWS] = maxIdx == i ? (T) 1.0 : (T) 0.0; } } } else { int tadsPerThread = tads / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); auto offset = tadOffsets[r]; Nd4jLong shapeIter[MAX_RANK]; Nd4jLong coord[MAX_RANK]; int dim; Nd4jLong xStridesIter[MAX_RANK]; Nd4jLong resultStridesIter[MAX_RANK]; auto xShape = shape::shapeOf(tadShapeShapeInfo); auto xStride = shape::stride(tadShapeShapeInfo); auto resultStride = shape::stride(tadShapeShapeInfo); int rank = shape::rank(tadShapeShapeInfo); T *xPointer = dx + offset; T *resultPointer = result + offset; T maxValue = xPointer[0]; T *maxCursor = resultPointer; Nd4jPointer maxCursorLong = reinterpret_cast<Nd4jPointer>(maxCursor); if (PrepareTwoRawArrayIter<T>(rank, xShape, xPointer, xStride, resultPointer, resultStride, &rank, shapeIter, &xPointer, xStridesIter, &resultPointer, resultStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { if (maxValue < xPointer[0]) { maxCursor = resultPointer; maxCursorLong = reinterpret_cast<Nd4jPointer>(resultPointer); maxValue = xPointer[0]; } resultPointer[0] = 0.0; } ND4J_RAW_ITER_TWO_NEXT(dim, rank, coord, shapeIter, xPointer, xStridesIter, resultPointer, resultStridesIter); maxCursor = reinterpret_cast<T *>(maxCursorLong); maxCursor[0] = 1.0; } } } } delete[] dimension; } } op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; }
GrB_Monoid_wait.c
//------------------------------------------------------------------------------ // GrB_Monoid_wait: wait for a user-defined GrB_Monoid to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // In SuiteSparse:GraphBLAS, a user-defined GrB_Monoid has no pending // operations to wait for. All this method does is verify that the monoid is // properly initialized, and then it does an OpenMP flush. #include "GB.h" GrB_Info GrB_Monoid_wait // no work, just check if the GrB_Monoid is valid ( GrB_Monoid *monoid ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #pragma omp flush GB_WHERE1 ("GrB_Monoid_wait (&monoid)") ; GB_RETURN_IF_NULL (monoid) ; GB_RETURN_IF_NULL_OR_FAULTY (*monoid) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
HelloOMP.c
#include <stdio.h> #include <omp.h> int main (void) { #pragma omp parallel printf("(%d:!!!Hello world!!!)", omp_get_thread_num()); return (0); }
builder.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef BUILDER_H_ #define BUILDER_H_ #include <algorithm> #include <parallel/algorithm> #include <cinttypes> #include <fstream> #include <functional> #include <type_traits> #include <utility> #include <set> #include <omp.h> #include <cassert> #include "command_line.h" #include "generator.h" #include "graph.h" #include "platform_atomics.h" #include "pvector.h" #include "reader.h" #include "timer.h" #include "util.h" #include "sliding_queue.h" /* GAP Benchmark Suite Class: BuilderBase Author: Scott Beamer Given arguements from the command line (cli), returns a built graph - MakeGraph() will parse cli and obtain edgelist and call MakeGraphFromEL(edgelist) to perform actual graph construction - edgelist can be from file (reader) or synthetically generated (generator) - Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h) */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, bool invert = true> class BuilderBase { typedef EdgePair<NodeID_, DestID_> Edge; typedef pvector<Edge> EdgeList; const CLBase &cli_; bool symmetrize_; bool needs_weights_; int64_t num_nodes_ = -1; public: explicit BuilderBase(const CLBase &cli) : cli_(cli) { symmetrize_ = cli_.symmetrize(); needs_weights_ = !std::is_same<NodeID_, DestID_>::value; } DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) { return e.u; } DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) { return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w); } NodeID_ FindMaxNodeID(const EdgeList &el) { NodeID_ max_seen = 0; #pragma omp parallel for reduction(max : max_seen) for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; max_seen = std::max(max_seen, e.u); max_seen = std::max(max_seen, (NodeID_) e.v); } return max_seen; } pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) { pvector<NodeID_> degrees(num_nodes_, 0); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) fetch_and_add(degrees[e.u], 1); if (symmetrize_ || (!symmetrize_ && transpose)) fetch_and_add(degrees[(NodeID_) e.v], 1); } return degrees; } static pvector<SGOffset> PrefixSum(const pvector<NodeID_> &degrees) { pvector<SGOffset> sums(degrees.size() + 1); SGOffset total = 0; for (size_t n=0; n < degrees.size(); n++) { sums[n] = total; total += degrees[n]; } sums[degrees.size()] = total; return sums; } static pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<SGOffset> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset lsum = 0; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<SGOffset> bulk_prefix(num_blocks+1); SGOffset total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<SGOffset> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } // Removes self-loops and redundant edges // Side effect: neighbor IDs will be sorted void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose, DestID_*** sq_index, DestID_** sq_neighs) { pvector<NodeID_> diffs(g.num_nodes()); DestID_ *n_start, *n_end; #pragma omp parallel for private(n_start, n_end) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) { n_start = g.in_neigh(n).begin(); n_end = g.in_neigh(n).end(); } else { n_start = g.out_neigh(n).begin(); n_end = g.out_neigh(n).end(); } std::sort(n_start, n_end); DestID_ *new_end = std::unique(n_start, n_end); new_end = std::remove(n_start, new_end, n); diffs[n] = new_end - n_start; } pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]]; *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); #pragma omp parallel for private(n_start) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) n_start = g.in_neigh(n).begin(); else n_start = g.out_neigh(n).begin(); std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); } } CSRGraph<NodeID_, DestID_, invert> SquishGraph( const CSRGraph<NodeID_, DestID_, invert> &g) { DestID_ **out_index, *out_neighs, **in_index, *in_neighs; SquishCSR(g, false, &out_index, &out_neighs); if (g.directed()) { if (invert) SquishCSR(g, true, &in_index, &in_neighs); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs, in_index, in_neighs); } else { return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs); } } /* Graph Bulding Steps (for CSR): - Read edgelist once to determine vertex degrees (CountDegrees) - Determine vertex offsets by a prefix sum (ParallelPrefixSum) - Allocate storage and set points according to offsets (GenIndex) - Copy edges into storage */ void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index, DestID_** neighs) { pvector<NodeID_> degrees = CountDegrees(el, transpose); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); *neighs = new DestID_[offsets[num_nodes_]]; *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; if (symmetrize_ || (!symmetrize_ && transpose)) (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] = GetSource(e); } } CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) { DestID_ **index = nullptr, **inv_index = nullptr; DestID_ *neighs = nullptr, *inv_neighs = nullptr; Timer t; t.Start(); if (num_nodes_ == -1) num_nodes_ = FindMaxNodeID(el)+1; if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el); MakeCSR(el, false, &index, &neighs); if (!symmetrize_ && invert) MakeCSR(el, true, &inv_index, &inv_neighs); t.Stop(); PrintTime("Build Time", t.Seconds()); if (symmetrize_) return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs); else return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs, inv_index, inv_neighs); } CSRGraph<NodeID_, DestID_, invert> MakeGraph() { CSRGraph<NodeID_, DestID_, invert> g; { // extra scope to trigger earlier deletion of el (save memory) EdgeList el; if (cli_.filename() != "") { Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename()); if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) { return r.ReadSerializedGraph(); } else { el = r.ReadFile(needs_weights_); } } else if (cli_.scale() != -1) { Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree()); el = gen.GenerateEL(cli_.uniform()); } g = MakeGraphFromEL(el); } return SquishGraph(g); } // Relabels (and rebuilds) graph by order of decreasing degree static CSRGraph<NodeID_, DestID_, invert> RelabelByDegree( const CSRGraph<NodeID_, DestID_, invert> &g) { if (g.directed()) { std::cout << "Cannot relabel directed graph" << std::endl; std::exit(-11); } Timer t; t.Start(); typedef std::pair<int64_t, NodeID_> degree_node_p; pvector<degree_node_p> degree_id_pairs(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) degree_id_pairs[n] = std::make_pair(g.out_degree(n), n); std::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_node_p>()); pvector<NodeID_> degrees(g.num_nodes()); pvector<NodeID_> new_ids(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[n] = degree_id_pairs[n].first; new_ids[degree_id_pairs[n].second] = n; } pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for schedule (dynamic, 1024) for (NodeID_ u=0; u < g.num_nodes(); u++) { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("Relabel", t.Seconds()); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs); } static CSRGraph<NodeID_, DestID_, invert> RandOrder ( const CSRGraph<NodeID_, DestID_, invert> &g, pvector<NodeID_> &new_ids, bool createOnlyDegList, bool createBothCSRs) { Timer t; t.Start(); std::srand(0); //so that the random graph generated is the same everytime bool outDegree = true; if (g.directed() == true) { //Step I: create a random permutation - SLOW implementation pvector<NodeID_> claimedVtxs(g.num_nodes(), 0); //#pragma omp parallel for #pragma omp parallel { std::mt19937 rng(kRandSeed + omp_get_thread_num()); std::uniform_int_distribution<NodeID_> udist(0, g.num_nodes()-1); #pragma omp for schedule(dynamic, 8) for (NodeID_ v = 0; v < g.num_nodes(); ++v) { while (true) { //NodeID_ randID = std::rand() % g.num_nodes(); NodeID_ randID = udist(rng); if (claimedVtxs[randID] != 1) { if (compare_and_swap(claimedVtxs[randID], 0, 1) == true) { new_ids[v] = randID; break; } else continue; } } } } #pragma omp parallel for for (NodeID_ v = 0; v < g.num_nodes(); ++v) assert(new_ids[v] != -1); /* Step VI: generate degree to build a new graph */ pvector<NodeID_> degrees(g.num_nodes()); pvector<NodeID_> inv_degrees(g.num_nodes()); if (outDegree == true) { #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[new_ids[n]] = g.out_degree(n); inv_degrees[new_ids[n]] = g.in_degree(n); } } else { #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[new_ids[n]] = g.in_degree(n); inv_degrees[new_ids[n]] = g.out_degree(n); } } /* Graph building phase */ pvector<SGOffset> offsets = ParallelPrefixSum(inv_degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for schedule (dynamic, 1024) for (NodeID_ u=0; u < g.num_nodes(); u++) { if (outDegree == true) { for (NodeID_ v : g.in_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; } else { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; } std::sort(index[new_ids[u]], index[new_ids[u]+1]); //sort neighbors of each vertex } DestID_* inv_neighs(nullptr); DestID_** inv_index(nullptr); if (createOnlyDegList == true || createBothCSRs == true) { // making the inverse list (in-degrees in this case) pvector<SGOffset> inv_offsets = ParallelPrefixSum(degrees); inv_neighs = new DestID_[inv_offsets[g.num_nodes()]]; inv_index = CSRGraph<NodeID_, DestID_>::GenIndex(inv_offsets, inv_neighs); if (createBothCSRs == true) { #pragma omp parallel for schedule(dynamic, 1024) for (NodeID_ u=0; u < g.num_nodes(); u++) { if (outDegree == true) { for (NodeID_ v : g.out_neigh(u)) inv_neighs[inv_offsets[new_ids[u]]++] = new_ids[v]; } else { for (NodeID_ v : g.in_neigh(u)) inv_neighs[inv_offsets[new_ids[u]]++] = new_ids[v]; } std::sort(inv_index[new_ids[u]], inv_index[new_ids[u]+1]); //sort neighbors of each vertex } } } t.Stop(); PrintTime("RandOrder time", t.Seconds()); if (outDegree == true) { return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), inv_index, inv_neighs, index, neighs); } else { return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs, inv_index, inv_neighs); } } else { /* Undirected graphs - no need to make separate lists for in and out degree */ //Step I: create a random permutation - SLOW implementation pvector<NodeID_> claimedVtxs(g.num_nodes(), 0); //#pragma omp parallel for #pragma omp parallel { std::mt19937 rng(kRandSeed + omp_get_thread_num()); std::uniform_int_distribution<NodeID_> udist(0, g.num_nodes()-1); #pragma omp for schedule(dynamic, 8) for (NodeID_ v = 0; v < g.num_nodes(); ++v) { while (true) { //NodeID_ randID = std::rand() % g.num_nodes(); NodeID_ randID = udist(rng); if (claimedVtxs[randID] != 1) { if (compare_and_swap(claimedVtxs[randID], 0, 1) == true) { new_ids[v] = randID; break; } else continue; } } } } #pragma omp parallel for for (NodeID_ v = 0; v < g.num_nodes(); ++v) assert(new_ids[v] != -1); /* Step VI: generate degree to build a new graph */ pvector<NodeID_> degrees(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[new_ids[n]] = g.out_degree(n); } /* Graph building phase */ pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for schedule (dynamic, 1024) for (NodeID_ u=0; u < g.num_nodes(); u++) { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("RandOrder time", t.Seconds()); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs); } } /* Return a compressed transpose matrix (Rereference Matrix) */ static void makeOffsetMatrix(const CSRGraph<NodeID_, DestID_, invert> &g, pvector<uint8_t> &offsetMatrix, int numVtxPerLine, bool traverseCSR = true) { assert(g.directed() == true); //TODO: Add support for undirected CSR Timer tm; #if 0 /* Step 0: Sanity check that neighborhoods are sorted */ #pragma omp parallel for schedule(dynamic, 64) for (NodeID_ v = 0; v < g.num_nodes(); ++v) { NodeID_ maxNgh {-1}; for (NodeID ngh: g.out_neigh(v)) { assert(ngh > maxNgh); maxNgh = ngh; } } #endif /* Step I: Collect quantized edges & Compact vertices into "super vertices" */ tm.Start(); NodeID_ numCacheLines = (g.num_nodes() + numVtxPerLine - 1) / numVtxPerLine; NodeID_ numEpochs = 256; NodeID_ epochSz = (g.num_nodes() + numEpochs - 1) / numEpochs; pvector<NodeID_> lastRef(numCacheLines * numEpochs, -1); #pragma omp parallel for schedule(dynamic, 8) for (NodeID_ c = 0; c < numCacheLines; ++c) { NodeID_ startVtx = c * numVtxPerLine; NodeID_ endVtx = (c+1) * numVtxPerLine; if (c == numCacheLines - 1) endVtx = g.num_nodes(); for (NodeID_ v = startVtx; v < endVtx; ++v) { if (traverseCSR == true) { for (NodeID_ ngh : g.out_neigh(v)) { NodeID_ nghEpoch = ngh / epochSz; lastRef[(c * numEpochs) + nghEpoch] = std::max(ngh, lastRef[(c * numEpochs) + nghEpoch]); } } else { for (NodeID_ ngh : g.in_neigh(v)) { NodeID_ nghEpoch = ngh / epochSz; lastRef[(c * numEpochs) + nghEpoch] = std::max(ngh, lastRef[(c * numEpochs) + nghEpoch]); } } } } tm.Stop(); std::cout << "[CSR-HYBRID-PREPROCESSING] Time to quantize nghs and compact vertices = " << tm.Seconds() << std::endl; /* Step II: Converting adjacency matrix into offsets */ tm.Start(); uint8_t maxReref = 127; //because MSB is reserved for identifying between reref val (1) & switch point (0) NodeID_ subEpochSz = (epochSz + 127) / 128; //Using remaining 7 bits to identify intra-epoch information pvector<uint8_t> compressedOffsets(numCacheLines * numEpochs); uint8_t mask = 1; uint8_t orMask = mask << 7; uint8_t andMask = ~(orMask); assert(orMask == 128 && andMask == 127); #pragma omp parallel for schedule (static) for (NodeID_ c = 0; c < numCacheLines; ++c) { { // first set values for the last epoch NodeID_ e = numEpochs - 1; if (lastRef[(c * numEpochs) + e] != -1) { compressedOffsets[(c * numEpochs) + e] = maxReref; compressedOffsets[(c * numEpochs) + e] &= andMask; } else { compressedOffsets[(c * numEpochs) + e] = maxReref; compressedOffsets[(c * numEpochs) + e] |= orMask; } } //Now back track and set values for all epochs for (NodeID_ e = numEpochs - 2; e >= 0; --e) { if (lastRef[(c * numEpochs) + e] != -1) { // There was a ref this epoch - store the quantized val of the lastRef NodeID_ subEpochDist = lastRef[(c * numEpochs) + e] - (e * epochSz); compressedOffsets[(c * numEpochs) + e] = static_cast<uint8_t>(subEpochDist / subEpochSz); compressedOffsets[(c * numEpochs) + e] &= andMask; } else { if ((compressedOffsets[(c * numEpochs) + e + 1] & orMask) != 0) { //No access next epoch as well - add inter-epoch distance compressedOffsets[(c * numEpochs) + e] = compressedOffsets[(c * numEpochs) + e + 1]; compressedOffsets[(c * numEpochs) + e] = std::min(compressedOffsets[(c * numEpochs) + e], maxReref); } else { //There is an access next epoch - so inter-epoch distance is set to next epoch compressedOffsets[(c * numEpochs) + e] = 1; } compressedOffsets[(c * numEpochs) + e] |= orMask; } } } tm.Stop(); std::cout << "[CSR-HYBRID-PREPROCESSING] Time to convert to offsets matrix = " << tm.Seconds() << std::endl; /* Step III: Transpose edgePresent*/ tm.Start(); #pragma omp parallel for schedule (static) for (NodeID_ c = 0; c < numCacheLines; ++c) { for (NodeID_ e = 0; e < numEpochs; ++e) { offsetMatrix[(e * numCacheLines) + c] = compressedOffsets[(c * numEpochs) + e]; } } tm.Stop(); std::cout << "[CSR-HYBRID-PREPROCESSING] Time to transpose offsets matrix = " << tm.Seconds() << std::endl; /* // Sanity check - The following conditions will be checked // 1. If the MSB bit in an epoch is zero, then there should be a reference to the cacheline in that epoch // 2. If the MSB bit in an epoch is zero, then the remaining 7 bits should capture the last reference within the epoch // 3. If the MSB bit in an epoch is 1, then there should be no reference to the cacheline in that epoch // 4. If the MSB bit in an epoch is 1, then use the information in the remaining 7 bits to ascertain which epoch a line will be accessed next if (traverseCSR == true) { //Step I: compute per-cacheline neighborhoods std::vector<std::set<NodeID_> > lineNghs; lineNghs.resize(numCacheLines); #pragma omp parallel for schedule(dynamic, 8) for (NodeID_ c = 0; c < numCacheLines; ++c) { NodeID_ startVtx = c * numVtxPerLine; NodeID_ endVtx = (c+1) * numVtxPerLine; if (c == numCacheLines - 1) endVtx = g.num_nodes(); for (NodeID_ v = startVtx; v < endVtx; ++v) { for (NodeID_ ngh : g.out_neigh(v)) { lineNghs[c].insert(ngh); } } } //Step II: Check conditions 1 & 3 for epoch data structure #pragma omp parallel for schedule(static) for (NodeID_ c = 0; c < numCacheLines; ++c) { for (NodeID_ e = 0; e < numEpochs; ++e) { if ((compressedOffsets[(c * numEpochs) + e] & orMask) != 0) { // Cond 3: There should be no reference this epoch NodeID_ epochStart = e * epochSz; NodeID_ epochEnd = (e + 1) * epochSz; auto it = std::lower_bound(lineNghs[c].begin(), lineNghs[c].end(), epochStart); if (it != lineNghs[c].end()) { NodeID_ lastRef = *it; assert(lastRef >= epochEnd); } } else { // Cond 1: There must be a reference this epoch NodeID_ epochStart = e * epochSz; NodeID_ epochEnd = (e + 1) * epochSz; auto it = std::lower_bound(lineNghs[c].begin(), lineNghs[c].end(), epochStart); assert(it != lineNghs[c].end()); NodeID_ lastRef = *it; assert(lastRef < epochEnd); } } } //Step III: Check conditions 2 & 4 for epoch data structure #pragma omp parallel for schedule(static) for (NodeID_ c = 0; c < numCacheLines; ++c) { for (NodeID_ e = 0; e < numEpochs; ++e) { if ((compressedOffsets[(c * numEpochs) + e] & orMask) != 0) { // Cond 4: The inter-epoch info points to the next epoch uint8_t interEpochDist = compressedOffsets[(c * numEpochs) + e] & andMask; NodeID_ epochStart = (e + interEpochDist) * epochSz; auto it = std::lower_bound(lineNghs[c].begin(), lineNghs[c].end(), epochStart); if (it != lineNghs[c].end()) { NodeID_ lastRef = *it; assert(lastRef >= epochStart); //minimum condition //because of quantization we cant be sure exactly which epoch (or if ever) the line will be accessed } } else { // Cond 2: We track the correct intra-Epoch lastRef NodeID_ epochStart = e * epochSz; NodeID_ epochEnd = (e + 1) * epochSz; auto it = std::lower_bound(lineNghs[c].begin(), lineNghs[c].end(), epochEnd); it--; NodeID_ lastRef = *it; assert(lastRef < epochEnd && lastRef >= epochStart); NodeID_ subEpochDist = (*it) - epochStart; uint8_t subEpochDistQ = subEpochDist / subEpochSz; uint8_t intraEpochDist = compressedOffsets[(c * numEpochs) + e] & andMask; if (e != numEpochs - 1) assert(intraEpochDist == subEpochDistQ); } } } } else { //Step I: compute per-cacheline neighborhoods std::vector<std::set<NodeID_> > lineNghs; lineNghs.resize(numCacheLines); #pragma omp parallel for schedule(dynamic, 8) for (NodeID_ c = 0; c < numCacheLines; ++c) { NodeID_ startVtx = c * numVtxPerLine; NodeID_ endVtx = (c+1) * numVtxPerLine; if (c == numCacheLines - 1) endVtx = g.num_nodes(); for (NodeID_ v = startVtx; v < endVtx; ++v) { for (NodeID_ ngh : g.in_neigh(v)) { lineNghs[c].insert(ngh); } } } //Step II: Check conditions 1 & 3 for epoch data structure #pragma omp parallel for schedule(static) for (NodeID_ c = 0; c < numCacheLines; ++c) { for (NodeID_ e = 0; e < numEpochs; ++e) { if ((compressedOffsets[(c * numEpochs) + e] & orMask) != 0) { // Cond 3: There should be no reference this epoch NodeID_ epochStart = e * epochSz; NodeID_ epochEnd = (e + 1) * epochSz; auto it = std::lower_bound(lineNghs[c].begin(), lineNghs[c].end(), epochStart); if (it != lineNghs[c].end()) { NodeID_ lastRef = *it; assert(lastRef >= epochEnd); } } else { // Cond 1: There must be a reference this epoch NodeID_ epochStart = e * epochSz; NodeID_ epochEnd = (e + 1) * epochSz; auto it = std::lower_bound(lineNghs[c].begin(), lineNghs[c].end(), epochStart); assert(it != lineNghs[c].end()); NodeID_ lastRef = *it; assert(lastRef < epochEnd); } } } //Step III: Check conditions 2 & 4 for epoch data structure #pragma omp parallel for schedule(static) for (NodeID_ c = 0; c < numCacheLines; ++c) { for (NodeID_ e = 0; e < numEpochs; ++e) { if ((compressedOffsets[(c * numEpochs) + e] & orMask) != 0) { // Cond 4: The inter-epoch info points to the next epoch uint8_t interEpochDist = compressedOffsets[(c * numEpochs) + e] & andMask; NodeID_ epochStart = (e + interEpochDist) * epochSz; auto it = std::lower_bound(lineNghs[c].begin(), lineNghs[c].end(), epochStart); if (it != lineNghs[c].end()) { NodeID_ lastRef = *it; assert(lastRef >= epochStart); //minimum condition //because of quantization we cant be sure exactly which epoch (or if ever) the line will be accessed } } else { // Cond 2: We track the correct intra-Epoch lastRef NodeID_ epochStart = e * epochSz; NodeID_ epochEnd = (e + 1) * epochSz; auto it = std::lower_bound(lineNghs[c].begin(), lineNghs[c].end(), epochEnd); it--; NodeID_ lastRef = *it; assert(lastRef < epochEnd && lastRef >= epochStart); NodeID_ subEpochDist = (*it) - epochStart; uint8_t subEpochDistQ = subEpochDist / subEpochSz; uint8_t intraEpochDist = compressedOffsets[(c * numEpochs) + e] & andMask; if (e != numEpochs - 1) assert(intraEpochDist == subEpochDistQ); } } } } */ } }; #endif // BUILDER_H_
conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: quanwang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_kernel_x86.h" #include "wino_conv_kernel_x86.h" #if __AVX__ #include <immintrin.h> #endif #ifndef _MSC_VER #include <sys/time.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) static double get_current_time() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000.0 + tv.tv_usec / 1000.0; } #endif static int get_private_mem_size(struct ir_tensor* filter) { if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32 return filter->elem_num * filter->elem_size * 4; else return filter->elem_num * filter->elem_size; // caution } static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } static void interleave_uint8(struct ir_tensor* filter, struct conv_priv_info* priv_info) { /* dequant uint8 weight to fp32 for simulator */ float* weight_fp32 = (float* )priv_info->interleave_buffer; uint8_t* weight_uint8 = (uint8_t*)filter->data; float scale = filter->scale; int zero_point = filter->zero_point; for (int i = 0; i < filter->elem_num; i++) { weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale; } } void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_uint8(uint8_t* data_img, float* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { uint8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; float in_fp32 = ((float)in[0] - (float)zero_point) * scale; out[0] = in_fp32; out++; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_int8(int8_t* data_img, int8_t* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; int8_t * out = data_col + (c * outh + h) * outw; const int8_t * end = out + w_high; if (im_row >= 0 && im_row < inh) { int8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(int8_t)); out += w_low; while (out < end) { in += sw; out[0] = in[0]; out++; } memset(out, 0, (outw - w_high) * sizeof(int8_t)); } else { memset(out, 0, outw * sizeof(int8_t)); } } } } static void im2col_ir(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void* input_base = (void*)((uint8_t*)input->data + (n * image_size + group * group_size) * input->elem_size); void* im2col_buf = (void*)priv_info->im2col_buffer; if (input->data_type == TENGINE_DT_FP32) { im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } else if (input->data_type == TENGINE_DT_UINT8) { im2col_uint8(input_base, im2col_buf, input, output, param); } else if (input->data_type == TENGINE_DT_INT8) { im2col_int8(input_base, im2col_buf, input, output, param); } else { printf("Input data type %d not to be supported.\n", input->data_type); } } void input_pack4_fp32(int K, int N, float* pB, float* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float* img = pB + i; float* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { #if __AVX__ _mm256_storeu_ps(tmp, _mm256_loadu_ps(img)); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; #endif // __SSE__ tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const float* img = pB + i; float* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_fp(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; float* output4 = pC + (i + 4) * N; float* output5 = pC + (i + 5) * N; float* output6 = pC + (i + 6) * N; float* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); __m256 _sum4 = _mm256_set1_ps(0.0); __m256 _sum5 = _mm256_set1_ps(0.0); __m256 _sum6 = _mm256_set1_ps(0.0); __m256 _sum7 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _va4 = _mm256_broadcast_ss(va + 4); __m256 _va5 = _mm256_broadcast_ss(va + 5); __m256 _va6 = _mm256_broadcast_ss(va + 6); __m256 _va7 = _mm256_broadcast_ss(va + 7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256 _sum0_7 = _mm256_set1_ps(0.0); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb + 1); __m256 _vb2 = _mm256_broadcast_ss(vb + 2); __m256 _vb3 = _mm256_broadcast_ss(vb + 3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va + 8); __m256 _va2 = _mm256_loadu_ps(va + 16); __m256 _va3 = _mm256_loadu_ps(va + 24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k < K; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; float sum4 = 0; float sum5 = 0; float sum6 = 0; float sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m128 _sum0_3 = _mm_set1_ps(0.0); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < K; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { float* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif // __AVX__ output += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k + 3 < K; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } #ifdef _WIN32 float sum0 = _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3]; #else float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #endif #else float sum0 = 0.f; #endif // __AVX__ for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } void input_pack4_int8(int K, int N, int8_t* pB, int8_t* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_i8(int M, int N, int K, int8_t* pA_t, int8_t* pB_t, int32_t* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; //#pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int32_t* output4 = pC + (i + 4) * N; int32_t* output5 = pC + (i + 5) * N; int32_t* output6 = pC + (i + 6) * N; int32_t* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); __m256i _sum4 = _mm256_set1_epi32(0); __m256i _sum5 = _mm256_set1_epi32(0); __m256i _sum6 = _mm256_set1_epi32(0); __m256i _sum7 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum7); va += 8; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum7); va += 8; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum7); va += 8; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum7); va += 8; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _va4 = _mm256_set1_epi32(*(va + 4)); __m256i _va5 = _mm256_set1_epi32(*(va + 5)); __m256i _va6 = _mm256_set1_epi32(*(va + 6)); __m256i _va7 = _mm256_set1_epi32(*(va + 7)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va4), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va5), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va6), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va7), _sum7); va += 8; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); _mm256_storeu_si256((__m256i* )output4, _sum4); _mm256_storeu_si256((__m256i* )output5, _sum5); _mm256_storeu_si256((__m256i* )output6, _sum6); _mm256_storeu_si256((__m256i* )output7, _sum7); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; int32_t sum4[8] = {0}; int32_t sum5[8] = {0}; int32_t sum6[8] = {0}; int32_t sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256i _sum0_7 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 16))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va += 32; vb += 4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum0); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_7 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_7); va += 8; vb += 1; } int32_t output_sum0_7[8] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; int32_t sum4 = 0; int32_t sum5 = 0; int32_t sum6 = 0; int32_t sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = K + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); va += 4; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); va += 4; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256i _sum0_3 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k=0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 4))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 12))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va+=16; vb+=4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum0); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_3 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_3); va += 4; vb += 1; } //drop last 4 value int32_t output_sum0_3[4] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { int32_t* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256i _sum0 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum0); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); va += 1; vb += 8; } _mm256_storeu_si256((__m256i* )output, _sum0); #else int32_t sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif output += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; int32_t sum0 = 0.f; for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; float* bias_fp32 = NULL; if (bias) bias_fp32 = ( float* )bias->data + outchan_g * group; float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = output_fp32; sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); // process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } // process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } // process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } static void sgemm_uint8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; uint8_t * output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int* bias_int32 = NULL; float bias_scale = 0.f; if (bias) { bias_int32 = ( int* )bias->data + outchan_g * group; bias_scale = input->scale * filter->scale; } float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); /* process bias */ if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_sgemm[output_off] += (float )bias_int32[i] * bias_scale; } } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; if (output_sgemm[output_off] > 6) output_sgemm[output_off] = 6; } } } /* quant from fp32 to uint8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int udata = ( int )(round(output_sgemm[output_off] / output->scale) + output->zero_point); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[output_off] = udata; } } sys_free(output_sgemm); } static void sgemm_int8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; int8_t* interleave_int8 = ( int8_t* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; int8_t* im2col_pack4_int8 = priv_info->im2col_buffer_pack4; int8_t * output_int8 = ( int8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int32_t * bias_int32 = NULL; if (bias) bias_int32 = ( int* )bias->data + outchan_g * group; float input_scale = input->scale; float* kernel_scales = filter->scale_list; float output_scale = output->scale; int8_t* filter_sgemm = interleave_int8; int8_t* input_sgemm_pack4 = im2col_pack4_int8; int32_t* output_sgemm_int32 = (int32_t*)sys_malloc(outchan_g * out_h * out_w * sizeof(int32_t)); float* output_sgemm_fp32 = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread); /* process bias and dequant output from int32 to fp32 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (bias) output_sgemm_fp32[output_off] = (float )(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_sgemm_fp32[output_off] = (float )output_sgemm_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; if (output_sgemm_fp32[output_off] > 6) output_sgemm_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int32_t data_i32 = ( int32_t )(round(output_sgemm_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_sgemm_int32); sys_free(output_sgemm_fp32); } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param* param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; // simulator uint8 inference with fp32 if (input->data_type == TENGINE_DT_UINT8) elem_size = 4; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; return (8 * K * (N / 8 + N % 8)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter) { int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size; return size; } void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info* priv_info) { float* pA = ( float* )priv_info->interleave_buffer; float* pA_t = ( float* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; const float* k4 = pA + (p + 4) * K; const float* k5 = pA + (p + 5) * K; const float* k6 = pA + (p + 6) * K; const float* k7 = pA + (p + 7) * K; float* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const float* k0 = pA + (p + 0) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info* priv_info) { int8_t* pA = ( int8_t * )priv_info->interleave_buffer; int8_t* pA_t = ( int8_t* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; const int8_t* k4 = pA + (p + 4) * K; const int8_t* k5 = pA + (p + 5) * K; const int8_t* k6 = pA + (p + 6) * K; const int8_t* k7 = pA + (p + 7) * K; int8_t* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const int8_t* k0 = pA + (p + 0) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ if (input_tensor->data_type == TENGINE_DT_FP32) { priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (input_tensor->data_type == TENGINE_DT_UINT8) interleave_uint8(filter_tensor, priv_info); else interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8) conv_hcl_interleave_pack4_fp32(M, K, priv_info); else conv_hcl_interleave_pack4_int8(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } else { priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer; priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size; } return 0; } int conv_hcl_postrun(struct conv_priv_info* priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) // batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; void* im2col_buffer = priv_info->im2col_buffer; if (priv_info->external_interleave_pack4_mem) { if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8) input_pack4_fp32(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); else input_pack4_int8(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); } else { priv_info->im2col_buffer_pack4 = im2col_buffer; } if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_UINT8) sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_INT8) sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else { printf("Input data type %d not to be supported.\n", input_tensor->data_type); return -1; } } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
target-critical-1.c
/* { dg-do run } */ #include <omp.h> #include <stdlib.h> #define N 2000 #pragma omp declare target int foo () { int A[N]; int i, nthreads; int res = 0; #pragma omp parallel shared (A, nthreads) { #pragma omp master nthreads = omp_get_num_threads (); #pragma omp for for (i = 0; i < N; i++) A[i] = 0; #pragma omp critical (crit1) for (i = 0; i < N; i++) A[i]++; } for (i = 0; i < N; i++) if (A[i] != nthreads) res = 1; return res; } #pragma omp end declare target int main () { int res1, res2; #pragma omp target map (from: res1, res2) { int B[N]; int i, nthreads; res1 = foo (); #pragma omp parallel shared (B, nthreads) { #pragma omp master nthreads = omp_get_num_threads (); #pragma omp for for (i = 0; i < N; i++) B[i] = 0; #pragma omp critical (crit2) for (i = 0; i < N; i++) B[i]++; } res2 = 0; for (i = 0; i < N; i++) if (B[i] != nthreads) res2 = 1; } if (res1 || res2) abort (); return 0; }
ab-totient-omp-4.c
// Distributed and parallel technologies, Andrew Beveridge, 03/03/2014 // To Compile: gcc -Wall -O -o ab-totient-omp -fopenmp ab-totient-omp.c // To Run / Time: /usr/bin/time -v ./ab-totient-omp range_start range_end #include <stdio.h> #include <omp.h> /* When input is a prime number, the totient is simply the prime number - 1. Totient is always even (except for 1). If n is a positive integer, then φ(n) is the number of integers k in the range 1 ≤ k ≤ n for which gcd(n, k) = 1 */ long getTotient (long number) { long result = number; // Check every prime number below the square root for divisibility if(number % 2 == 0){ result -= result / 2; do number /= 2; while(number %2 == 0); } // Primitive replacement for a list of primes, looping through every odd number long prime; for(prime = 3; prime * prime <= number; prime += 2){ if(number %prime == 0){ result -= result / prime; do number /= prime; while(number % prime == 0); } } // Last common factor if(number > 1) result -= result / number; // Return the result. return result; } // Main method. int main(int argc, char ** argv) { // Load inputs long lower, upper; sscanf(argv[1], "%ld", &lower); sscanf(argv[2], "%ld", &upper); int i; long result = 0.0; // We know the answer if it's 1; no need to execute the function if(lower == 1) { result = 1.0; lower = 2; } #pragma omp parallel for default(shared) private(i) schedule(auto) reduction(+:result) num_threads(4) // Sum all totients in the specified range for (i = lower; i <= upper; i++) { result = result + getTotient(i); } // Print the result printf("Sum of Totients between [%ld..%ld] is %ld \n", lower, upper, result); // A-OK! return 0; }
filter.h
#ifndef OPENMC_TALLIES_FILTER_H #define OPENMC_TALLIES_FILTER_H #include <cstdint> #include <memory> #include <string> #include <unordered_map> #include <vector> #include <gsl/gsl> #include "openmc/hdf5_interface.h" #include "openmc/particle.h" #include "pugixml.hpp" namespace openmc { //============================================================================== //! Stores bins and weights for filtered tally events. //============================================================================== class FilterMatch { public: std::vector<int> bins_; std::vector<double> weights_; int i_bin_; bool bins_present_ {false}; }; } // namespace openmc // Without an explicit instantiation of vector<FilterMatch>, the Intel compiler // will complain about the threadprivate directive on filter_matches. Note that // this has to happen *outside* of the openmc namespace extern template class std::vector<openmc::FilterMatch>; namespace openmc { //============================================================================== //! Modifies tally score events. //============================================================================== class Filter { public: //---------------------------------------------------------------------------- // Constructors, destructors, factory functions Filter(); virtual ~Filter(); //! Create a new tally filter // //! \param[in] type Type of the filter //! \param[in] id Unique ID for the filter. If none is passed, an ID is //! automatically assigned //! \return Pointer to the new filter object static Filter* create(const std::string& type, int32_t id = -1); //! Create a new tally filter from an XML node // //! \param[in] node XML node //! \return Pointer to the new filter object static Filter* create(pugi::xml_node node); //! Uses an XML input to fill the filter's data fields. virtual void from_xml(pugi::xml_node node) = 0; //---------------------------------------------------------------------------- // Methods virtual std::string type() const = 0; //! Matches a tally event to a set of filter bins and weights. //! //! \param[out] match will contain the matching bins and corresponding //! weights; note that there may be zero matching bins virtual void get_all_bins(const Particle* p, int estimator, FilterMatch& match) const = 0; //! Writes data describing this filter to an HDF5 statepoint group. virtual void to_statepoint(hid_t filter_group) const { write_dataset(filter_group, "type", type()); write_dataset(filter_group, "n_bins", n_bins_); } //! Return a string describing a filter bin for the tallies.out file. // //! For example, an `EnergyFilter` might return the string //! "Incoming Energy [0.625E-6, 20.0)". virtual std::string text_label(int bin) const = 0; //---------------------------------------------------------------------------- // Accessors //! Get unique ID of filter //! \return Unique ID int32_t id() const { return id_; } //! Assign a unique ID to the filter //! \param[in] Unique ID to assign. A value of -1 indicates that an ID should //! be automatically assigned void set_id(int32_t id); //! Get number of bins //! \return Number of bins int n_bins() const { return n_bins_; } gsl::index index() const { return index_; } //---------------------------------------------------------------------------- // Data members protected: int n_bins_; private: int32_t id_ {-1}; gsl::index index_; }; //============================================================================== // Global variables //============================================================================== namespace simulation { extern std::vector<FilterMatch> filter_matches; #pragma omp threadprivate(filter_matches) } // namespace simulation namespace model { extern "C" int32_t n_filters; extern std::vector<std::unique_ptr<Filter>> tally_filters; extern std::unordered_map<int, int> filter_map; } //============================================================================== // Non-member functions //============================================================================== //! Make sure index corresponds to a valid filter int verify_filter(int32_t index); } // namespace openmc #endif // OPENMC_TALLIES_FILTER_H
Bvh.h
#ifndef GAME_ENGINE_BVH_H #define GAME_ENGINE_BVH_H #include "CoreLib/Basic.h" #include "CoreLib/Graphics/BBox.h" #include "Ray.h" namespace GameEngine { using namespace CoreLib::Basic; const int nBuckets = 16; class BvhNode { public: CoreLib::Graphics::BBox Bounds; unsigned int Axis : 2; unsigned int SkipBBoxTest : 1; int ElementCount : 29; union { int ElementId; int ChildOffset; }; inline bool GetIsLeaf() { return ElementCount != 0; } inline int GetElementCount() { return ElementCount; } }; template<typename T> class BvhNode_Build { public: CoreLib::Graphics::BBox Bounds; int Axis; T** Elements; int ElementCount; BvhNode_Build* Children[2]; void AllocElements(int count) { Elements = (T**)malloc(count * sizeof(T*)); ElementCount = count; } void FreeElements() { if (Elements) { free(Elements); Elements = 0; } } BvhNode_Build() { Children[0] = 0; Children[1] = 0; Axis = 0; ElementCount = 0; Elements = 0; } ~BvhNode_Build() { if (Children[0]) delete Children[0]; if (Children[1]) delete Children[1]; FreeElements(); } }; template<typename T> class Bvh_Build { public: CoreLib::RefPtr<BvhNode_Build<T>> Root; int ElementListSize; int NodeCount = 0; }; template<typename T> class Bvh { private: int FlattenNodes(BvhNode_Build<T> * node) { int id = Nodes.Count(); BvhNode n; n.Axis = node->Axis; n.Bounds = node->Bounds; if (node->Elements == 0) n.ElementCount = 0; else n.ElementCount = node->ElementCount; n.SkipBBoxTest = 0; Nodes.Add(n); if (node->Elements == 0) { FlattenNodes(node->Children[0]); Nodes[id].ChildOffset = FlattenNodes(node->Children[1]) - id; } else { Nodes[id].ElementId = Elements.Count(); for (int i = 0; i < node->ElementCount; i++) Elements.Add(*node->Elements[i]); } return id; } public: CoreLib::List<BvhNode> Nodes; CoreLib::List<T> Elements; void FromBuild(Bvh_Build<T> &bvh) { Nodes.Clear(); Elements.Clear(); Nodes.Reserve((int)bvh.NodeCount); Elements.Reserve((int)bvh.ElementListSize); FlattenNodes(bvh.Root.operator->()); } }; template<typename T> class BuildData { public: T * Element; CoreLib::Graphics::BBox Bounds; VectorMath::Vec3 Center; }; inline float SurfaceArea(CoreLib::Graphics::BBox & box) { return ((box.xMax - box.xMin)*(box.yMax - box.yMin) + (box.xMax - box.xMin)*(box.zMax - box.zMax) + (box.yMax - box.yMin)*(box.zMax - box.zMin))*2.0f; } struct BucketInfo { int count; CoreLib::Graphics::BBox bounds; BucketInfo() { count = 0; bounds.Init(); } }; template<typename T, typename CostEvaluator> BvhNode_Build<T> * ConstructBvhNodeNonRec(BuildData<T>* elements, int elementCount, int & elementListSize, int & nodeCount, CostEvaluator & eval) { struct BvhJob { BvhNode_Build<T> ** result; BuildData<T>* elements; int elementCount; BvhJob() {} BvhJob(BvhNode_Build<T> ** result, BuildData<T>* elements, int elementCount) { this->elements = elements; this->elementCount = elementCount; this->result = result; } }; const int stackSize = 256; BvhJob stack[stackSize]; int stackPtr = 0; auto pushJob = [&](BvhNode_Build<T> ** result, BuildData<T>* elements, int elementCount) { BvhJob job(result, elements, elementCount); if (stackPtr < stackSize) stack[stackPtr++] = job; else throw "stack overflow"; }; auto popJob = [&]()->BvhJob { if (stackPtr) return stack[--stackPtr]; else throw "stack empty"; }; BvhNode_Build<T> * rs = 0; nodeCount = 0; elementListSize = 0; BvhJob job(&rs, elements, elementCount); while (true) { BvhNode_Build<T> * node = new BvhNode_Build<T>(); nodeCount++; (*job.result) = node; BuildData<T>* jElements = job.elements; int jElementCount = job.elementCount; if (jElementCount == 0) { printf("elementCount = 0 !"); throw 0; } if (jElementCount == 1 || stackPtr == stackSize) { node->Bounds = jElements->Bounds; node->AllocElements((int)jElementCount); for (int i = 0; i < (int)jElementCount; i++) { node->Elements[i] = jElements[i].Element; } elementListSize += jElementCount; if (!stackPtr) break; else job = popJob(); continue; } else { CoreLib::Graphics::BBox centroidBounds; CoreLib::Graphics::BBox bbox; centroidBounds.Init(); bbox.Init(); for (int i = 0; i < jElementCount; i++) { centroidBounds.Union(jElements[i].Center); bbox.Union(jElements[i].Bounds); } node->Bounds = bbox; int dim = centroidBounds.MaxDimension(); if (centroidBounds.Min[dim] == centroidBounds.Max[dim]) { node->Bounds = bbox; node->AllocElements((int)jElementCount); for (int i = 0; i < (int)jElementCount; i++) { node->Elements[i] = jElements[i].Element; } elementListSize += jElementCount; if (!stackPtr) break; else job = popJob(); continue; } BucketInfo buckets[nBuckets]; for (int i = 0; i < jElementCount; i++) { int b = (int)(nBuckets * ((jElements[i].Center[dim] - centroidBounds.Min[dim]) / (centroidBounds.Max[dim] - centroidBounds.Min[dim]))); if (b == nBuckets) b = nBuckets - 1; buckets[b].count++; buckets[b].bounds.Union(jElements[i].Bounds); } float minCost = FLT_MAX; int minCostSplit = 0; CoreLib::Graphics::BBox bounds1[nBuckets - 1]; bounds1[nBuckets - 2] = buckets[nBuckets - 1].bounds; for (int i = nBuckets - 3; i >= 0; i--) { bounds1[i].Init(); bounds1[i].Union(buckets[i + 1].bounds); bounds1[i].Union(bounds1[i + 1]); } CoreLib::Graphics::BBox b0; b0.Init(); int count0 = 0; for (int i = 0; i < nBuckets - 1; i++) { b0.Union(buckets[i].bounds); count0 += buckets[i].count; int count1 = (int)jElementCount - count0; float cost = eval.EvalCost(count0, SurfaceArea(b0), count1, SurfaceArea(bounds1[i]), SurfaceArea(bbox)); if (cost < minCost) { minCost = cost; minCostSplit = i; } } if (jElementCount > CostEvaluator::ElementsPerNode || minCost < jElementCount) { BuildData<T> *pmid = std::partition(jElements, jElements + jElementCount, [&](const BuildData<T> &p) { int b = (int)(nBuckets * ((p.Center[dim] - centroidBounds.Min[dim]) / (centroidBounds.Max[dim] - centroidBounds.Min[dim]))); if (b == nBuckets) b = nBuckets - 1; return b <= minCostSplit; }); node->Axis = dim; job = BvhJob(node->Children, jElements, (int)(pmid - jElements)); pushJob(node->Children + 1, pmid, (int)(jElements + jElementCount - pmid)); } else { node->AllocElements((int)jElementCount); node->Bounds = bbox; for (int i = 0; i < (int)jElementCount; i++) { node->Elements[i] = jElements[i].Element; } elementListSize += jElementCount; if (!stackPtr) break; else job = popJob(); continue; } } } return rs; } template<typename T, typename CostEvaluator> BvhNode_Build<T> * ConstructBvhNode(Bvh_Build<T> & tree, BuildData<T>* elements, int elementCount, int & elementListSize, int & nodeCount, CostEvaluator & eval, int depth) { BvhNode_Build<T> * node = new BvhNode_Build<T>(); nodeCount = 1; elementListSize = 0; if (elementCount == 1 || depth == 61) { node->Bounds = elements->Bounds; node->AllocElements(1); node->Elements[0] = elements->Element; elementListSize = 1; return node; } else { CoreLib::Graphics::BBox centroidBounds; CoreLib::Graphics::BBox bbox; centroidBounds.Init(); bbox.Init(); for (int i = 0; i < elementCount; i++) { centroidBounds.Union(elements[i].Center); bbox.Union(elements[i].Bounds); } node->Bounds = bbox; int dim = centroidBounds.MaxDimension(); if (centroidBounds.Min[dim] == centroidBounds.Max[dim]) { node->Bounds = bbox; node->AllocElements((int)elementCount); for (int i = 0; i < (int)elementCount; i++) { node->Elements[i] = elements[i].Element; } elementListSize = elementCount; return node; } BucketInfo buckets[nBuckets]; if (elementCount > (2 << 12)) { const int processorCount = 16; BucketInfo buckets_proc[processorCount][nBuckets]; int blockSize = (int)(elementCount / processorCount); #pragma omp parallel for for (int procId = 0; procId < processorCount; procId++) { int end; if (procId == processorCount - 1) end = (int)elementCount; else end = (procId + 1)*blockSize; for (int i = procId * blockSize; i < end; i++) { int b = (int)(nBuckets * ((elements[i].Center[dim] - centroidBounds.Min[dim]) / (centroidBounds.Max[dim] - centroidBounds.Min[dim]))); if (b == nBuckets) b = nBuckets - 1; buckets_proc[procId][b].count++; buckets_proc[procId][b].bounds.Union(elements[i].Bounds); } } for (int i = 0; i < nBuckets; i++) { for (int j = 0; j < processorCount; j++) { buckets[i].count += buckets_proc[j][i].count; buckets[i].bounds.Union(buckets_proc[j][i].bounds); } } } else { for (int i = 0; i < elementCount; i++) { int b = (int)(nBuckets * ((elements[i].Center[dim] - centroidBounds.Min[dim]) / (centroidBounds.Max[dim] - centroidBounds.Min[dim]))); if (b == nBuckets) b = nBuckets - 1; buckets[b].count++; buckets[b].bounds.Union(elements[i].Bounds); } } CoreLib::Graphics::BBox bounds1[nBuckets - 1]; bounds1[nBuckets - 2] = buckets[nBuckets - 1].bounds; for (int i = nBuckets - 3; i >= 0; i--) { bounds1[i].Init(); bounds1[i].Union(buckets[i + 1].bounds); bounds1[i].Union(bounds1[i + 1]); } CoreLib::Graphics::BBox b0; b0.Init(); int count0 = 0; float minCost = FLT_MAX; int minCostSplit = 0; for (int i = 0; i < nBuckets - 1; i++) { b0.Union(buckets[i].bounds); count0 += buckets[i].count; int count1 = (int)elementCount - count0; float cost = eval.EvalCost(count0, SurfaceArea(b0), count1, SurfaceArea(bounds1[i]), SurfaceArea(bbox)); if (cost < minCost) { minCost = cost; minCostSplit = i; } } if (elementCount > CostEvaluator::ElementsPerNode || minCost < elementCount) { BuildData<T> *pmid = std::partition(elements, elements + elementCount, [&](const BuildData<T> &p) { int b = (int)(nBuckets * ((p.Center[dim] - centroidBounds.Min[dim]) / (centroidBounds.Max[dim] - centroidBounds.Min[dim]))); if (b == nBuckets) b = nBuckets - 1; return b <= minCostSplit; }); node->Axis = dim; int listSize1, listSize2; int nodeCount1, nodeCount2; if (depth > 8) { node->Children[0] = ConstructBvhNodeNonRec<T, CostEvaluator>(elements, (int)(pmid - elements), listSize1, nodeCount1, eval); node->Children[1] = ConstructBvhNodeNonRec<T, CostEvaluator>(pmid, (int)(elements + elementCount - pmid), listSize2, nodeCount2, eval); } else { #pragma omp parallel sections { #pragma omp section node->Children[0] = ConstructBvhNode<T, CostEvaluator>(tree, elements, (int)(pmid - elements), listSize1, nodeCount1, eval, depth + 1); #pragma omp section node->Children[1] = ConstructBvhNode<T, CostEvaluator>(tree, pmid, (int)(elements + elementCount - pmid), listSize2, nodeCount2, eval, depth + 1); } } node->ElementCount = (int)(elementListSize = listSize1 + listSize2); nodeCount += nodeCount1 + nodeCount2; } else { node->AllocElements((int)elementCount); node->Bounds = bbox; for (int i = 0; i < (int)elementCount; i++) { node->Elements[i] = elements[i].Element; } elementListSize = elementCount; } return node; } } template<typename T, typename CostEvaluator> void ConstructBvh(Bvh_Build<T> & tree, BuildData<T>* elements, int elementCount, CostEvaluator & eval) { tree.Root = ConstructBvhNode<T, CostEvaluator>(tree, elements, elementCount, tree.ElementListSize, tree.NodeCount, eval, 0); } template<typename T, typename Tracer, typename THit, bool pred> bool TraverseBvh(const Tracer & tracer, THit& rs, Bvh<T> & tree, const Ray & ray, VectorMath::Vec3 rcpDir) { bool hit = false; float tmax = ray.tMax; int dirIsNeg[3] = { rcpDir.x < 0, rcpDir.y < 0, rcpDir.z < 0 }; BvhNode* node = tree.Nodes.Buffer(); int todoOffset = 0; BvhNode* todo[256]; auto traceRay = ray; while (true) { float t1, t2; if (RayBBoxIntersection_RcpDir(node->Bounds, ray.Origin, rcpDir, t1, t2) && t1 < traceRay.tMax) { if (node->ElementCount > 0) { THit inter; for (int i = node->ElementId; i < node->ElementId + node->ElementCount; i++) { if (tracer.Trace(inter, tree.Elements[i], traceRay, tmax)) { if (pred) return true; if (tmax <= traceRay.tMax) { rs = inter; traceRay.tMax = tmax; hit = true; } } } if (todoOffset == 0) break; node = todo[--todoOffset]; } else { if (ray.Origin[node->Axis] > (node + 1)->Bounds.Max[node->Axis]) { todo[todoOffset++] = node + 1; node = node + node->ChildOffset; } else { todo[todoOffset++] = node + node->ChildOffset; node = node + 1; } } } else { if (todoOffset == 0) break; node = todo[--todoOffset]; } } return hit; } } #endif
IBKMKC_vector_operations.c
/* IBK Math Kernel Library Copyright (c) 2001-today, Institut fuer Bauklimatik, TU Dresden, Germany Written by A. Nicolai, A. Paepcke, H. Fechner, St. Vogelsang All rights reserved. This file is part of the IBKMK Library. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This library contains derivative work based on other open-source libraries, see LICENSE and OTHER_LICENSES files. */ #include "IBKMKC_vector_operations.h" #include <memory.h> #include <IBK_openMP.h> #ifdef __cplusplus namespace IBKMK { #endif void vectorUInt_fill( IBKMK_CONST unsigned int vectorSize, unsigned int * targetVector, IBKMK_CONST unsigned int value){ int i=0; int j; #pragma omp single copyprivate(i) { for (; i<(int)vectorSize % 8; ++i) { targetVector[i] = value; } } #pragma omp for for ( j = i; j<(int)vectorSize; j+=8) { targetVector[j ] = value; targetVector[j+1] = value; targetVector[j+2] = value; targetVector[j+3] = value; targetVector[j+4] = value; targetVector[j+5] = value; targetVector[j+6] = value; targetVector[j+7] = value; } } void vector_fill( IBKMK_CONST unsigned int vectorSize, double * targetVector, IBKMK_CONST double value){ int i=0; int j; #pragma omp single copyprivate(i) { for (; i<(int)vectorSize % 8; ++i) { targetVector[i] = value; } } #pragma omp for for ( j = i; j<(int)vectorSize; j+=8) { targetVector[j ] = value; targetVector[j+1] = value; targetVector[j+2] = value; targetVector[j+3] = value; targetVector[j+4] = value; targetVector[j+5] = value; targetVector[j+6] = value; targetVector[j+7] = value; } } void vector_copy( IBKMK_CONST unsigned int n, IBKMK_CONST double * x, double * y) { // openmp doesn't support memcopy since it is not thread safe (static variable use). // for serial code, always use memcopy #if defined(_OPENMP) unsigned int i=0; int j; #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { y[i] = x[i]; } } #pragma omp for for ( j = i; j<(int)n; j+=8) { y[j ] = x[j ]; y[j+1] = x[j+1]; y[j+2] = x[j+2]; y[j+3] = x[j+3]; y[j+4] = x[j+4]; y[j+5] = x[j+5]; y[j+6] = x[j+6]; y[j+7] = x[j+7]; } #else /// \todo Think about checking against x == y --> might indicate programming error in /// calling code. // nobody beats memcopy as long he uses compiler intrinsics memcpy(y, x, n*sizeof(double) ); #endif // defined(_OPENMP) } void vector_scale(IBKMK_CONST unsigned int n, double a, IBKMK_CONST double * x, double * y) { unsigned int i=0; int j; /* align data */ #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { y[i] = a*x[i]; } } /* use loop unrolling for 8 bytes */ #pragma omp for for ( j=i; j<(int)n; j+=8) { y[j ] = a*x[j ]; y[j+1] = a*x[j+1]; y[j+2] = a*x[j+2]; y[j+3] = a*x[j+3]; y[j+4] = a*x[j+4]; y[j+5] = a*x[j+5]; y[j+6] = a*x[j+6]; y[j+7] = a*x[j+7]; } } void vector_scale_by(IBKMK_CONST unsigned int n, double a, double * x) { unsigned int i=0; int j; /* align data */ #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { x[i] *= a; } } /* use loop unrolling for 8 bytes */ #pragma omp for for (j=i; j<(int)n; j+=8) { x[j ] *= a; x[j+1] *= a; x[j+2] *= a; x[j+3] *= a; x[j+4] *= a; x[j+5] *= a; x[j+6] *= a; x[j+7] *= a; } } void vector_add(IBKMK_CONST unsigned int n, double a, IBKMK_CONST double * x, double * y) { unsigned int i=0; int j; if (a == IBKMK_ONE) { /* align data */ #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { y[i] += x[i]; } } /* use loop unrolling for 8 bytes */ #pragma omp for for ( j=i; j<(int)n; j+=8) { y[j ] += x[j ]; y[j+1] += x[j+1]; y[j+2] += x[j+2]; y[j+3] += x[j+3]; y[j+4] += x[j+4]; y[j+5] += x[j+5]; y[j+6] += x[j+6]; y[j+7] += x[j+7]; } } else { /* align data */ #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { y[i] += a*x[i]; } } /* use loop unrolling for 8 bytes */ #pragma omp for for ( j=i; j<(int)n; j+=8) { y[j ] += a*x[j ]; y[j+1] += a*x[j+1]; y[j+2] += a*x[j+2]; y[j+3] += a*x[j+3]; y[j+4] += a*x[j+4]; y[j+5] += a*x[j+5]; y[j+6] += a*x[j+6]; y[j+7] += a*x[j+7]; } } } void vector_sub(IBKMK_CONST unsigned int n, IBKMK_CONST double * x, double * y) { unsigned int i=0; int j; /* align data */ #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { y[i] -= x[i]; } } /* use loop unrolling for 8 bytes */ #pragma omp for for (j=i; j<(int)n; j+=8) { y[j ] -= x[j ]; y[j+1] -= x[j+1]; y[j+2] -= x[j+2]; y[j+3] -= x[j+3]; y[j+4] -= x[j+4]; y[j+5] -= x[j+5]; y[j+6] -= x[j+6]; y[j+7] -= x[j+7]; } } void vector_linear_sum(IBKMK_CONST unsigned int n, double a, IBKMK_CONST double * x, double b, IBKMK_CONST double * y, double * z) { unsigned int i=0; int j; #pragma omp single copyprivate(i) { /* align data */ for (; i<n % 8; ++i) { z[i] = a*x[i] + b*y[i]; } } /* use loop unrolling for 8 bytes */ #pragma omp for for ( j=i; j<(int)n; j+=8) { z[j ] = a*x[j ] + b*y[j ]; z[j+1] = a*x[j+1] + b*y[j+1]; z[j+2] = a*x[j+2] + b*y[j+2]; z[j+3] = a*x[j+3] + b*y[j+3]; z[j+4] = a*x[j+4] + b*y[j+4]; z[j+5] = a*x[j+5] + b*y[j+5]; z[j+6] = a*x[j+6] + b*y[j+6]; z[j+7] = a*x[j+7] + b*y[j+7]; } } #ifdef __cplusplus } // namespace IBKMK #endif
ConvexLS.h
/////////////////////////////////////////////////////////////////////////////// // Dem Bones - Skinning Decomposition Library // // Copyright (c) 2019, Electronic Arts. All rights reserved. // /////////////////////////////////////////////////////////////////////////////// #ifndef DEM_BONES_CONVEX_LS #define DEM_BONES_CONVEX_LS #include <Eigen/Dense> #include <Eigen/StdVector> #include "Indexing.h" namespace Dem { /** @class ConvexLS ConvexLS.h "DemBones/ConvexLS.h" @brief Linear least squares solver with non-negativity constraint and optional affinity constraint @details Solve: @f{eqnarray*}{ min &||Ax-b||^2 \\ \mbox{Subject to: } & x(0).. x(n-1) \geq 0, \\ \mbox{(optional) } & x(0) +.. + x(n-1) = 1 @f} The solver implements active set method to handle non-negativity constraint and QR decomposition to handle affinity constraint. @b _Scalar is the floating-point data type. */ template<class _Scalar> class ConvexLS { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW using MatrixX=Eigen::Matrix<_Scalar, Eigen::Dynamic, Eigen::Dynamic>; using VectorX=Eigen::Matrix<_Scalar, Eigen::Dynamic, 1>; /** Constructor, just call init() @param[in] maxSize is the maximum size of the unknown @f$ x @f$ if the affinity constraint is imposed. */ ConvexLS(int maxSize=1) { q2.resize(0); init(maxSize); } /** Init matrices @f$ Q @f$ in the QR decomposition used for affinity constraint @param[in] maxSize is the maximum size of the unknown @f$ x @f$ if the affinity constraint is imposed. */ void init(int maxSize) { int curN=(int)q2.size()+1; if (curN<maxSize) { q2.resize(maxSize-1); #pragma omp parallel for for (int n=curN-1; n<maxSize-1; n++) q2[n]=MatrixX(VectorX::Constant(n+2, _Scalar(1)).householderQr().householderQ()).rightCols(n+1); } } /** Solve the least squares problem @param[in] aTa is the cross product matrix @f$ A^TA @f$ @param[in] aTb is the vector @f$ A^Tb @f$ @param[in, out] x is the by-reference output and it is also the init solution (if @b warmStart == @c true) @param[in] affine=true will impose affinity constraint @param[in] warmStart=true will initialize the solution by @b x */ void solve(const MatrixX& aTa, const VectorX& aTb, VectorX& x, bool affine, bool warmStart=false) { int n=int(aTa.cols()); if (!warmStart) x=VectorX::Constant(n, _Scalar(1)/n); Eigen::ArrayXi idx(n); int np=0; for (int i=0; i<n; i++) if (x(i)>0) idx[np++]=i; else idx[n-i+np-1]=i; VectorX p; for (int rep=0; rep<n; rep++) { solveP(aTa, aTb, x, idx, np, affine, p); if ((indexing_vector(x, idx.head(np))+indexing_vector(p, idx.head(np))).minCoeff()>=0) { x+=p; if (np==n) break; Eigen::Index iMax; (indexing_vector(aTb, idx.tail(n-np))-indexing_row(aTa, idx.tail(n-np))*x).maxCoeff(&iMax); std::swap(idx[iMax+np], idx[np]); np++; } else { _Scalar alpha; int iMin=-1; for (int i=0; i<np; i++) if (p(idx[i])<0) { if ((iMin==-1)||(x(idx[i])<-alpha*p(idx[i]))) { alpha=-x(idx[i])/p(idx[i]); iMin=i; } } x+=alpha*p; _Scalar eps=std::abs(x(idx[iMin])); x(idx[iMin])=0; for (int i=0; i<np; i++) if (x(idx[i])<=eps) std::swap(idx[i--], idx[--np]); } if (affine) x/=x.sum(); } } private: //! Store @f$ Q @f$ matrices in QR decompositions, except the first column. q2.size()==maxSize-1 (of x), q2[n].size()==(n+2)*(n+1) std::vector<MatrixX, Eigen::aligned_allocator<MatrixX>> q2; /** Solve the gradient @param[in] aTa is the cross product matrix @f$ A^TA @f$ @param[in] aTb is the vector @f$ A^Tb @f$ @param[in] x is the current solution @param[in] idx indicates the current active set, @p idx(0).. @p idx(np-1) are passive (free) variables @param[in] np is the size of the active set @param[in] zeroSum=true will impose zer-sum of gradient @param[output] p is the by-reference negative gradient output */ void solveP(const MatrixX& aTa, const VectorX& aTb, const VectorX& x, const Eigen::ArrayXi& idx, int np, bool zeroSum, VectorX& p) { VectorX z; p.setZero(aTb.size()); if (!zeroSum) { z= indexing_row_col(aTa, idx.head(np), idx.head(np)).colPivHouseholderQr().solve( //A indexing_vector(aTb, idx.head(np))-indexing_row(aTa, idx.head(np))*x); //b for (int ip=0; ip<np; ip++) p(idx[ip])=z(ip); } else if (np>1) { z=q2[np-2]*( //Re-project (q2[np-2].transpose()*indexing_row_col(aTa, idx.head(np), idx.head(np))*q2[np-2]).colPivHouseholderQr().solve( //A q2[np-2].transpose()*(indexing_vector(aTb, idx.head(np))-indexing_row(aTa, idx.head(np))*x) )); //b for (int ip=0; ip<np; ip++) p(idx[ip])=z(ip); } } }; } #endif
omp_alloc_null_fb.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> int main() { omp_alloctrait_t at[2]; omp_allocator_handle_t a; void *p[2]; at[0].key = omp_atk_pool_size; at[0].value = 2 * 1024 * 1024; at[1].key = omp_atk_fallback; at[1].value = omp_atv_null_fb; a = omp_init_allocator(omp_large_cap_mem_space, 2, at); printf("allocator large created: %p\n", (void *)a); #pragma omp parallel num_threads(2) { int i = omp_get_thread_num(); #pragma omp barrier p[i] = omp_alloc(1024 * 1024, a); #pragma omp barrier printf("th %d, ptr %p\n", i, p[i]); omp_free(p[i], a); } // As an allocator has some small memory overhead // exactly one of the two pointers should be NULL // because of NULL fallback requested if ((p[0] == NULL && p[1] != NULL) || (p[0] != NULL && p[1] == NULL)) { printf("passed\n"); return 0; } else { printf("failed: pointers %p %p\n", p[0], p[1]); return 1; } }
convolution_3x3_pack1to8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16((__fp16)0.f); out0.fill(_bias0); const __fp16* k0 = kernel.channel(p); int q = 0; for (; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); float16x8_t _k00 = vld1q_f16(k0); float16x8_t _k01 = vld1q_f16(k0 + 8); float16x8_t _k02 = vld1q_f16(k0 + 16); float16x8_t _k10 = vld1q_f16(k0 + 24); float16x8_t _k11 = vld1q_f16(k0 + 32); float16x8_t _k12 = vld1q_f16(k0 + 40); float16x8_t _k20 = vld1q_f16(k0 + 48); float16x8_t _k21 = vld1q_f16(k0 + 56); float16x8_t _k22 = vld1q_f16(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum4 sum5 sum6 sum7 "sub %0, %0, #64 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1], #16 \n" // r0 "ld1 {v1.4h}, [%1] \n" "fmla v24.8h, %8.8h, v0.h[0] \n" "fmla v25.8h, %8.8h, v0.h[1] \n" "fmla v26.8h, %8.8h, v0.h[2] \n" "fmla v27.8h, %8.8h, v0.h[3] \n" "fmla v28.8h, %8.8h, v0.h[4] \n" "fmla v29.8h, %8.8h, v0.h[5] \n" "fmla v30.8h, %8.8h, v0.h[6] \n" "fmla v31.8h, %8.8h, v0.h[7] \n" "fmla v24.8h, %9.8h, v0.h[1] \n" "fmla v25.8h, %9.8h, v0.h[2] \n" "fmla v26.8h, %9.8h, v0.h[3] \n" "fmla v27.8h, %9.8h, v0.h[4] \n" "fmla v28.8h, %9.8h, v0.h[5] \n" "fmla v29.8h, %9.8h, v0.h[6] \n" "fmla v30.8h, %9.8h, v0.h[7] \n" "fmla v31.8h, %9.8h, v1.h[0] \n" "fmla v24.8h, %10.8h, v0.h[2] \n" "fmla v25.8h, %10.8h, v0.h[3] \n" "fmla v26.8h, %10.8h, v0.h[4] \n" "fmla v27.8h, %10.8h, v0.h[5] \n" "fmla v28.8h, %10.8h, v0.h[6] \n" "fmla v29.8h, %10.8h, v0.h[7] \n" "fmla v30.8h, %10.8h, v1.h[0] \n" "fmla v31.8h, %10.8h, v1.h[1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.8h}, [%2], #16 \n" // r1 "ld1 {v3.4h}, [%2] \n" "fmla v24.8h, %11.8h, v2.h[0] \n" "fmla v25.8h, %11.8h, v2.h[1] \n" "fmla v26.8h, %11.8h, v2.h[2] \n" "fmla v27.8h, %11.8h, v2.h[3] \n" "fmla v28.8h, %11.8h, v2.h[4] \n" "fmla v29.8h, %11.8h, v2.h[5] \n" "fmla v30.8h, %11.8h, v2.h[6] \n" "fmla v31.8h, %11.8h, v2.h[7] \n" "fmla v24.8h, %12.8h, v2.h[1] \n" "fmla v25.8h, %12.8h, v2.h[2] \n" "fmla v26.8h, %12.8h, v2.h[3] \n" "fmla v27.8h, %12.8h, v2.h[4] \n" "fmla v28.8h, %12.8h, v2.h[5] \n" "fmla v29.8h, %12.8h, v2.h[6] \n" "fmla v30.8h, %12.8h, v2.h[7] \n" "fmla v31.8h, %12.8h, v3.h[0] \n" "fmla v24.8h, %13.8h, v2.h[2] \n" "fmla v25.8h, %13.8h, v2.h[3] \n" "fmla v26.8h, %13.8h, v2.h[4] \n" "fmla v27.8h, %13.8h, v2.h[5] \n" "fmla v28.8h, %13.8h, v2.h[6] \n" "fmla v29.8h, %13.8h, v2.h[7] \n" "fmla v30.8h, %13.8h, v3.h[0] \n" "fmla v31.8h, %13.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.8h}, [%3], #16 \n" // r2 "ld1 {v5.4h}, [%3] \n" "fmla v24.8h, %14.8h, v4.h[0] \n" "fmla v25.8h, %14.8h, v4.h[1] \n" "fmla v26.8h, %14.8h, v4.h[2] \n" "fmla v27.8h, %14.8h, v4.h[3] \n" "fmla v28.8h, %14.8h, v4.h[4] \n" "fmla v29.8h, %14.8h, v4.h[5] \n" "fmla v30.8h, %14.8h, v4.h[6] \n" "fmla v31.8h, %14.8h, v4.h[7] \n" "fmla v24.8h, %15.8h, v4.h[1] \n" "fmla v25.8h, %15.8h, v4.h[2] \n" "fmla v26.8h, %15.8h, v4.h[3] \n" "fmla v27.8h, %15.8h, v4.h[4] \n" "fmla v28.8h, %15.8h, v4.h[5] \n" "fmla v29.8h, %15.8h, v4.h[6] \n" "fmla v30.8h, %15.8h, v4.h[7] \n" "fmla v31.8h, %15.8h, v5.h[0] \n" "fmla v24.8h, %16.8h, v4.h[2] \n" "fmla v25.8h, %16.8h, v4.h[3] \n" "fmla v26.8h, %16.8h, v4.h[4] \n" "fmla v27.8h, %16.8h, v4.h[5] \n" "fmla v28.8h, %16.8h, v4.h[6] \n" "fmla v29.8h, %16.8h, v4.h[7] \n" "fmla v30.8h, %16.8h, v5.h[0] \n" "fmla v31.8h, %16.8h, v5.h[1] \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1] \n" // r0 "fmla v28.8h, %8.8h, v0.h[0] \n" "fmla v29.8h, %8.8h, v0.h[1] \n" "fmla v30.8h, %8.8h, v0.h[2] \n" "fmla v31.8h, %8.8h, v0.h[3] \n" "fmla v28.8h, %9.8h, v0.h[1] \n" "fmla v29.8h, %9.8h, v0.h[2] \n" "fmla v30.8h, %9.8h, v0.h[3] \n" "fmla v31.8h, %9.8h, v0.h[4] \n" "fmla v28.8h, %10.8h, v0.h[2] \n" "fmla v29.8h, %10.8h, v0.h[3] \n" "fmla v30.8h, %10.8h, v0.h[4] \n" "fmla v31.8h, %10.8h, v0.h[5] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v1.8h}, [%2] \n" // r1 "fmla v28.8h, %11.8h, v1.h[0] \n" "fmla v29.8h, %11.8h, v1.h[1] \n" "fmla v30.8h, %11.8h, v1.h[2] \n" "fmla v31.8h, %11.8h, v1.h[3] \n" "fmla v28.8h, %12.8h, v1.h[1] \n" "fmla v29.8h, %12.8h, v1.h[2] \n" "fmla v30.8h, %12.8h, v1.h[3] \n" "fmla v31.8h, %12.8h, v1.h[4] \n" "fmla v28.8h, %13.8h, v1.h[2] \n" "fmla v29.8h, %13.8h, v1.h[3] \n" "fmla v30.8h, %13.8h, v1.h[4] \n" "fmla v31.8h, %13.8h, v1.h[5] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v2.8h}, [%3] \n" // r2 "fmla v28.8h, %14.8h, v2.h[0] \n" "fmla v29.8h, %14.8h, v2.h[1] \n" "fmla v30.8h, %14.8h, v2.h[2] \n" "fmla v31.8h, %14.8h, v2.h[3] \n" "fmla v28.8h, %15.8h, v2.h[1] \n" "fmla v29.8h, %15.8h, v2.h[2] \n" "fmla v30.8h, %15.8h, v2.h[3] \n" "fmla v31.8h, %15.8h, v2.h[4] \n" "fmla v28.8h, %16.8h, v2.h[2] \n" "fmla v29.8h, %16.8h, v2.h[3] \n" "fmla v30.8h, %16.8h, v2.h[4] \n" "fmla v31.8h, %16.8h, v2.h[5] \n" "add %1, %1, #8 \n" "add %2, %2, #8 \n" "add %3, %3, #8 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v30.8h, v31.8h}, [%0] \n" // sum0 sum1 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" // r0 "fmla v30.8h, %8.8h, v0.h[0] \n" "fmla v31.8h, %8.8h, v0.h[1] \n" "fmla v30.8h, %9.8h, v0.h[1] \n" "fmla v31.8h, %9.8h, v0.h[2] \n" "fmla v30.8h, %10.8h, v0.h[2] \n" "fmla v31.8h, %10.8h, v0.h[3] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v1.4h}, [%2] \n" // r1 "fmla v30.8h, %11.8h, v1.h[0] \n" "fmla v31.8h, %11.8h, v1.h[1] \n" "fmla v30.8h, %12.8h, v1.h[1] \n" "fmla v31.8h, %12.8h, v1.h[2] \n" "fmla v30.8h, %13.8h, v1.h[2] \n" "fmla v31.8h, %13.8h, v1.h[3] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v2.4h}, [%3] \n" // r2 "fmla v30.8h, %14.8h, v2.h[0] \n" "fmla v31.8h, %14.8h, v2.h[1] \n" "fmla v30.8h, %15.8h, v2.h[1] \n" "fmla v31.8h, %15.8h, v2.h[2] \n" "fmla v30.8h, %16.8h, v2.h[2] \n" "fmla v31.8h, %16.8h, v2.h[3] \n" "add %1, %1, #4 \n" "add %2, %2, #4 \n" "add %3, %3, #4 \n" "st1 {v30.8h, v31.8h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v30.8h}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" // r0 "fmla v30.8h, %8.8h, v0.h[0] \n" "fmla v30.8h, %9.8h, v0.h[1] \n" "fmla v30.8h, %10.8h, v0.h[2] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v1.4h}, [%2] \n" // r1 "fmla v30.8h, %11.8h, v1.h[0] \n" "fmla v30.8h, %12.8h, v1.h[1] \n" "fmla v30.8h, %13.8h, v1.h[2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v2.4h}, [%3] \n" // r2 "fmla v30.8h, %14.8h, v2.h[0] \n" "fmla v30.8h, %15.8h, v2.h[1] \n" "fmla v30.8h, %16.8h, v2.h[2] \n" "add %1, %1, #2 \n" "add %2, %2, #2 \n" "add %3, %3, #2 \n" "st1 {v30.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v30"); } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 8; } } } static void conv3x3s2_pack1to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16((__fp16)0.f); out0.fill(_bias0); const __fp16* k0 = kernel.channel(p); int q = 0; for (; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); float16x8_t _k00 = vld1q_f16(k0); float16x8_t _k01 = vld1q_f16(k0 + 8); float16x8_t _k02 = vld1q_f16(k0 + 16); float16x8_t _k10 = vld1q_f16(k0 + 24); float16x8_t _k11 = vld1q_f16(k0 + 32); float16x8_t _k12 = vld1q_f16(k0 + 40); float16x8_t _k20 = vld1q_f16(k0 + 48); float16x8_t _k21 = vld1q_f16(k0 + 56); float16x8_t _k22 = vld1q_f16(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1], #16 \n" // r0 "ld1 {v1.h}[0], [%1] \n" "fmla v28.8h, %8.8h, v0.h[0] \n" "fmla v29.8h, %8.8h, v0.h[2] \n" "fmla v30.8h, %8.8h, v0.h[4] \n" "fmla v31.8h, %8.8h, v0.h[6] \n" "fmla v28.8h, %9.8h, v0.h[1] \n" "fmla v29.8h, %9.8h, v0.h[3] \n" "fmla v30.8h, %9.8h, v0.h[5] \n" "fmla v31.8h, %9.8h, v0.h[7] \n" "fmla v28.8h, %10.8h, v0.h[2] \n" "fmla v29.8h, %10.8h, v0.h[4] \n" "fmla v30.8h, %10.8h, v0.h[6] \n" "fmla v31.8h, %10.8h, v1.h[0] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.8h}, [%2], #16 \n" // r1 "ld1 {v3.h}[0], [%2] \n" "fmla v28.8h, %11.8h, v2.h[0] \n" "fmla v29.8h, %11.8h, v2.h[2] \n" "fmla v30.8h, %11.8h, v2.h[4] \n" "fmla v31.8h, %11.8h, v2.h[6] \n" "fmla v28.8h, %12.8h, v2.h[1] \n" "fmla v29.8h, %12.8h, v2.h[3] \n" "fmla v30.8h, %12.8h, v2.h[5] \n" "fmla v31.8h, %12.8h, v2.h[7] \n" "fmla v28.8h, %13.8h, v2.h[2] \n" "fmla v29.8h, %13.8h, v2.h[4] \n" "fmla v30.8h, %13.8h, v2.h[6] \n" "fmla v31.8h, %13.8h, v3.h[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.8h}, [%3], #16 \n" // r2 "ld1 {v5.h}[0], [%3] \n" "fmla v28.8h, %14.8h, v4.h[0] \n" "fmla v29.8h, %14.8h, v4.h[2] \n" "fmla v30.8h, %14.8h, v4.h[4] \n" "fmla v31.8h, %14.8h, v4.h[6] \n" "fmla v28.8h, %15.8h, v4.h[1] \n" "fmla v29.8h, %15.8h, v4.h[3] \n" "fmla v30.8h, %15.8h, v4.h[5] \n" "fmla v31.8h, %15.8h, v4.h[7] \n" "fmla v28.8h, %16.8h, v4.h[2] \n" "fmla v29.8h, %16.8h, v4.h[4] \n" "fmla v30.8h, %16.8h, v4.h[6] \n" "fmla v31.8h, %16.8h, v5.h[0] \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v30.8h, v31.8h}, [%0] \n" // sum0 sum1 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n" // r0 "ld1 {v1.h}[0], [%1] \n" "fmla v30.8h, %8.8h, v0.h[0] \n" "fmla v31.8h, %8.8h, v0.h[2] \n" "fmla v30.8h, %9.8h, v0.h[1] \n" "fmla v31.8h, %9.8h, v0.h[3] \n" "fmla v30.8h, %10.8h, v0.h[2] \n" "fmla v31.8h, %10.8h, v1.h[0] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.4h}, [%2], #8 \n" // r1 "ld1 {v3.h}[0], [%2] \n" "fmla v30.8h, %11.8h, v2.h[0] \n" "fmla v31.8h, %11.8h, v2.h[2] \n" "fmla v30.8h, %12.8h, v2.h[1] \n" "fmla v31.8h, %12.8h, v2.h[3] \n" "fmla v30.8h, %13.8h, v2.h[2] \n" "fmla v31.8h, %13.8h, v3.h[0] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v4.4h}, [%3], #8 \n" // r2 "ld1 {v5.h}[0], [%3] \n" "fmla v30.8h, %14.8h, v4.h[0] \n" "fmla v31.8h, %14.8h, v4.h[2] \n" "fmla v30.8h, %15.8h, v4.h[1] \n" "fmla v31.8h, %15.8h, v4.h[3] \n" "fmla v30.8h, %16.8h, v4.h[2] \n" "fmla v31.8h, %16.8h, v5.h[0] \n" "st1 {v30.8h, v31.8h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v30.8h}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" // r0 "fmla v30.8h, %8.8h, v0.h[0] \n" "fmla v30.8h, %9.8h, v0.h[1] \n" "fmla v30.8h, %10.8h, v0.h[2] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v1.4h}, [%2] \n" // r1 "fmla v30.8h, %11.8h, v1.h[0] \n" "fmla v30.8h, %12.8h, v1.h[1] \n" "fmla v30.8h, %13.8h, v1.h[2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v2.4h}, [%3] \n" // r2 "fmla v30.8h, %14.8h, v2.h[0] \n" "fmla v30.8h, %15.8h, v2.h[1] \n" "fmla v30.8h, %16.8h, v2.h[2] \n" "add %1, %1, #4 \n" "add %2, %2, #4 \n" "add %3, %3, #4 \n" "st1 {v30.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v30"); } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 8; } } }
pi-v6.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #include "extrae_user_events.h" #define PROGRAM 1000 #define PI_COMPUTATION 1 #define END 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if _DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if _DEBUG_ start= omp_get_wtime(); #else Extrae_event (PROGRAM, PI_COMPUTATION); #endif /* do computation -- using all available threads */ // WARNING : correct code #pragma omp parallel private(i, x) reduction(+:sum) { int id = omp_get_thread_num(); int num_threads = omp_get_num_threads(); // interleaved execution of iterations among threads for (i=id; i < num_steps; i=i+num_threads) { x = (i+0.5)*step; sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } } pi = step * sum; #if _DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #else Extrae_event (PROGRAM, END); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
GB_unop__identity_int8_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_fp32) // op(A') function: GB (_unop_tran__identity_int8_fp32) // C type: int8_t // A type: float // cast: int8_t cij = GB_cast_to_int8_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = GB_cast_to_int8_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_fp32) ( int8_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; int8_t z = GB_cast_to_int8_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; int8_t z = GB_cast_to_int8_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987-2015 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-family/c-common.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Record whether a type is defined inside a struct or union type. This is used for -Wc++-compat. */ #define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE) /* Record whether an "incomplete type" error was given for the type. */ #define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For a PARM_DECL, nonzero if it was declared as an array. */ #define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (!prototype_p (TREE_TYPE (EXP)) \ && !DECL_BUILT_IN (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* For a CONSTRUCTOR, whether some initializer contains a subexpression meaning it is not a constant expression. */ #define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR)) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original unary/binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls (even if parenthesized), for subexpressions, and for non-constant initializers, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; /* If not NULL, the original type of an expression. This will differ from the type of the value field for an enum constant. The type of an enum constant is a plain integer type, but this field will be the enum type. */ tree original_type; }; /* Type alias for struct c_expr. This allows to use the structure inside the VEC types. */ typedef struct c_expr c_expr_t; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* No typespec. This appears only in struct c_declspec. */ ctsk_none, /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier, or _Atomic ( type-name ). */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* Whether the expression has operands suitable for use in constant expressions. */ bool expr_const_operands; /* The specifier itself. */ tree spec; /* An expression to be evaluated before the type specifier, in the case of typeof specifiers, or NULL otherwise or if no such expression is required for a particular typeof specifier. In particular, when typeof is applied to an expression of variably modified type, that expression must be evaluated in order to determine array sizes that form part of the type, but the expression itself (as opposed to the array sizes) forms no part of the type and so needs to be recorded separately. */ tree expr; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_int_n, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_fract, cts_accum, cts_auto_type }; /* This enum lists all the possible declarator specifiers, storage class or attribute that a user can write. There is at least one enumerator per possible declarator specifier in the struct c_declspecs below. It is used to index the array of declspec locations in struct c_declspecs. */ enum c_declspec_word { cdw_typespec /* A catch-all for a typespec. */, cdw_storage_class /* A catch-all for a storage class */, cdw_attributes, cdw_typedef, cdw_explicit_signed, cdw_deprecated, cdw_default_int, cdw_long, cdw_long_long, cdw_short, cdw_signed, cdw_unsigned, cdw_complex, cdw_inline, cdw_noreturn, cdw_thread, cdw_const, cdw_volatile, cdw_restrict, cdw_saturating, cdw_alignas, cdw_address_space, cdw_number_of_elements /* This one must always be the last enumerator. */ }; /* A sequence of declaration specifiers in C. When a new declaration specifier is added, please update the enum c_declspec_word above accordingly. */ struct c_declspecs { source_location locations[cdw_number_of_elements]; /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* Any expression to be evaluated before the type, from a typeof specifier. */ tree expr; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* The base-2 log of the greatest alignment required by an _Alignas specifier, in bytes, or -1 if no such specifiers with nonzero alignment. */ int align_log; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8; /* The kind of type specifier if one has been seen, ctsk_none otherwise. */ ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3; /* Whether any expressions in typeof specifiers may appear in constant expressions. */ BOOL_BITFIELD expr_const_operands : 1; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p : 1; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "_Noreturn" was speciied. */ BOOL_BITFIELD noreturn_p : 1; /* Whether "__thread" or "_Thread_local" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "__thread" rather than "_Thread_local" was specified. */ BOOL_BITFIELD thread_gnu_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; /* Whether "_Atomic" was specified. */ BOOL_BITFIELD atomic_p : 1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p : 1; /* Whether any alignment specifier (even with zero alignment) was specified. */ BOOL_BITFIELD alignas_p : 1; /* The address space that the declaration belongs to. */ addr_space_t address_space; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; typedef struct c_arg_tag_d { /* The argument name. */ tree id; /* The type of the argument. */ tree type; } c_arg_tag; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ vec<c_arg_tag, va_gc> *tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A compound expression of VLA sizes from the parameters, or NULL. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* While defining an enum type, this is 1 plus the last enumerator constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* A type of reference to a static identifier in an inline function. */ enum c_inline_static_type { /* Identifier with internal linkage used in function that may be an inline definition (i.e., file-scope static). */ csi_internal, /* Modifiable object with static storage duration defined in function that may be an inline definition (i.e., local static). */ csi_modifiable }; /* in c-parser.c */ extern void c_parse_init (void); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ struct c_spot_bindings; struct c_struct_parse_info; extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern bool global_bindings_p (void); extern void push_scope (void); extern tree pop_scope (void); extern void c_bindings_start_stmt_expr (struct c_spot_bindings *); extern void c_bindings_end_stmt_expr (struct c_spot_bindings *); extern void record_inline_static (location_t, tree, tree, enum c_inline_static_type); extern void c_init_decl_processing (void); extern void c_print_identifier (FILE *, tree, int); extern int quals_from_declspecs (const struct c_declspecs *); extern struct c_declarator *build_array_declarator (location_t, tree, struct c_declspecs *, bool, bool); extern tree build_enumerator (location_t, location_t, struct c_enum_contents *, tree, tree); extern tree check_for_loop_decls (location_t, bool); extern void mark_forward_parm_decls (void); extern void declare_parm_level (void); extern void undeclared_variable (location_t, tree); extern tree lookup_label_for_goto (location_t, tree); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern struct c_spot_bindings *c_get_switch_bindings (void); extern void c_release_switch_bindings (struct c_spot_bindings *); extern bool c_check_switch_jump_warnings (struct c_spot_bindings *, location_t, location_t); extern void finish_decl (tree, location_t, tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (void); extern tree finish_struct (location_t, tree, tree, tree, struct c_struct_parse_info *); extern struct c_arg_info *build_arg_info (void); extern struct c_arg_info *get_parm_info (bool, tree); extern tree grokfield (location_t, struct c_declarator *, struct c_declspecs *, tree, tree *); extern tree groktypename (struct c_type_name *, tree *, bool *); extern tree grokparm (const struct c_parm *, tree *); extern tree implicitly_declare (location_t, tree); extern void keep_next_level (void); extern void pending_xref_error (void); extern void c_push_function_context (void); extern void c_pop_function_context (void); extern void push_parm_decl (const struct c_parm *, tree *); extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, struct c_declarator *); extern tree c_builtin_function (tree); extern tree c_builtin_function_ext_scope (tree); extern void shadow_tag (const struct c_declspecs *); extern void shadow_tag_warned (const struct c_declspecs *, int); extern tree start_enum (location_t, struct c_enum_contents *, tree); extern int start_function (struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct (location_t, enum tree_code, tree, struct c_struct_parse_info **); extern void store_parm_decls (void); extern void store_parm_decls_from (struct c_arg_info *); extern void temp_store_parm_decls (tree, tree); extern void temp_pop_parm_decls (void); extern tree xref_tag (enum tree_code, tree); extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree); extern struct c_parm *build_c_parm (struct c_declspecs *, tree, struct c_declarator *); extern struct c_declarator *build_attrs_declarator (tree, struct c_declarator *); extern struct c_declarator *build_function_declarator (struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator (tree); extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs (void); extern struct c_declspecs *declspecs_add_qual (source_location, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_type (location_t, struct c_declspecs *, struct c_typespec); extern struct c_declspecs *declspecs_add_scspec (source_location, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_attrs (source_location, struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_addrspace (source_location, struct c_declspecs *, addr_space_t); extern struct c_declspecs *declspecs_add_alignas (source_location, struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs (struct c_declspecs *); /* in c-objc-common.c */ extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern bool c_warn_unused_global_decl (const_tree); extern void c_initialize_diagnostics (diagnostic_context *); extern bool c_vla_unspec_p (tree x, tree fn); /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern tree c_last_sizeof_arg; extern struct c_switch *c_switch_stack; extern tree c_objc_common_truthvalue_conversion (location_t, tree); extern tree require_complete_type (tree); extern int same_translation_unit_p (const_tree, const_tree); extern int comptypes (tree, tree); extern int comptypes_check_different_types (tree, tree, bool *); extern bool c_vla_type_p (const_tree); extern bool c_mark_addressable (tree); extern void c_incomplete_type_error (const_tree, const_tree); extern tree c_type_promotes_to (tree); extern struct c_expr default_function_array_conversion (location_t, struct c_expr); extern struct c_expr default_function_array_read_conversion (location_t, struct c_expr); extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr, bool, bool); extern void mark_exp_read (tree); extern tree composite_type (tree, tree); extern tree build_component_ref (location_t, tree, tree); extern tree build_array_ref (location_t, tree, tree); extern tree build_external_ref (location_t, tree, int, tree *); extern void pop_maybe_used (bool); extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr); extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *); extern struct c_expr parser_build_unary_op (location_t, enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op (location_t, enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr (location_t, tree, bool, tree, tree, tree, tree); extern tree build_compound_expr (location_t, tree, tree); extern tree c_cast_expr (location_t, struct c_type_name *, tree); extern tree build_c_cast (location_t, tree, tree); extern void store_init_value (location_t, tree, tree, tree); extern void maybe_warn_string_init (location_t, tree, struct c_expr); extern void start_init (tree, tree, int); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void push_init_level (location_t, int, struct obstack *); extern struct c_expr pop_init_level (location_t, int, struct obstack *); extern void set_init_index (location_t, tree, tree, struct obstack *); extern void set_init_label (location_t, tree, struct obstack *); extern void process_init_element (location_t, struct c_expr, bool, struct obstack *); extern tree build_compound_literal (location_t, tree, tree, bool); extern void check_compound_literal_type (location_t, struct c_type_name *); extern tree c_start_case (location_t, location_t, tree, bool); extern void c_finish_case (tree, tree); extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool); extern tree build_asm_stmt (tree, tree); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (location_t, tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree, bool); extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (location_t, tree); extern tree c_process_expr_stmt (location_t, tree); extern tree c_finish_expr_stmt (location_t, tree); extern tree c_finish_return (location_t, tree, tree); extern tree c_finish_bc_stmt (location_t, tree *, bool); extern tree c_finish_goto_label (location_t, tree); extern tree c_finish_goto_ptr (location_t, tree); extern tree c_expr_to_decl (tree, bool *, bool *); extern tree c_finish_oacc_parallel (location_t, tree, tree); extern tree c_finish_oacc_kernels (location_t, tree, tree); extern tree c_finish_oacc_data (location_t, tree, tree); extern tree c_begin_omp_parallel (void); extern tree c_finish_omp_parallel (location_t, tree, tree); extern tree c_begin_omp_task (void); extern tree c_finish_omp_task (location_t, tree, tree); extern void c_finish_omp_cancel (location_t, tree); extern void c_finish_omp_cancellation_point (location_t, tree); extern tree c_finish_omp_clauses (tree); extern tree c_build_va_arg (location_t, tree, tree); extern tree c_finish_transaction (location_t, tree, int); extern bool c_tree_equal (tree, tree); extern tree c_build_function_call_vec (location_t, vec<location_t>, tree, vec<tree, va_gc> *, vec<tree, va_gc> *); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* In c-decl.c */ /* Tell the binding oracle what kind of binding we are looking for. */ enum c_oracle_request { C_ORACLE_SYMBOL, C_ORACLE_TAG, C_ORACLE_LABEL }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier); extern c_binding_oracle_function *c_binding_oracle; extern void c_finish_incomplete_decl (tree); extern void c_write_global_declarations (void); extern tree c_omp_reduction_id (enum tree_code, tree); extern tree c_omp_reduction_decl (tree); extern tree c_omp_reduction_lookup (tree, tree); extern tree c_check_omp_declare_reduction_r (tree *, int *, void *); extern void c_pushtag (location_t, tree, tree); extern void c_bind (location_t, tree, bool); /* In c-errors.c */ extern void pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern bool pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); #endif /* ! GCC_C_TREE_H */
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32)),ceild(8*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t2+Nx,32),floord(Nt+Nx-4,32)),floord(2*t1+Nx+1,32)),floord(8*t3+Nx+4,32)),floord(4*t1-4*t2+Nz+Nx-1,32));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),32*t4+30),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
gm_map.h
#ifndef GM_MAP_H_ #define GM_MAP_H_ #include <map> #include "gm_internal.h" #include "gm_limits.h" #include "gm_lock.h" using namespace std; //map-interface template<class Key, class Value> class gm_map { public: virtual ~gm_map() { } ; virtual bool hasKey_seq(const Key key) = 0; virtual bool hasKey_par(const Key key) { return hasKey_seq(key); } /** * Return the value that has been set for the key - if none has been specified, the defaultValue is returned */ virtual Value getValue(const Key key) = 0; virtual void setValue_par(const Key key, Value value) = 0; virtual void setValue_seq(const Key key, Value value) = 0; /** * Sets the value mapped to the given key to the default value */ virtual void removeKey_par(const Key key) = 0; virtual void removeKey_seq(const Key key) = 0; /** * Returns true if the key corresponds to the highest value in the map. * If there has no value been set in the map, false is returned */ virtual bool hasMaxValue_seq(const Key key) = 0; virtual bool hasMaxValue_par(const Key key) { return hasMaxValue_seq(key); } /** * Returns true if the key corresponds to the lowest value in the map. * If there has no value been set in the map, false is returned */ virtual bool hasMinValue_seq(const Key key) = 0; virtual bool hasMinValue_par(const Key key) { return hasMinValue_seq(key); } /** * Returns the key that corresponds to the highest value in the map. * If there has no value been set in the map, the behavior is unspecified. */ virtual Key getMaxKey_seq() = 0; virtual Key getMaxKey_par() { return getMaxKey_seq(); } /** * Returns the key that corresponds to the lowest value in the map. * If there has no value been set in the map, the behavior is unspecified. */ virtual Key getMinKey_seq() = 0; virtual Key getMinKey_par() { return getMinKey_seq(); } /** * Returns the highest value in the map. * If there has no value been set in the map, the behavior is unspecified. */ virtual Value getMaxValue_seq() = 0; virtual Value getMaxValue_par() { return getMaxValue_seq(); } /** * Returns the lowest value in the map. * If there has no value been set in the map, the behavior is unspecified. */ virtual Value getMinValue_seq() = 0; virtual Value getMinValue_par() { return getMinValue_seq(); } /** * Adds the value of summand to the value mapped to key and returns the result. * This operation is atomic. If no value if mapped to key, key is mapped to summand * and summand is returned. */ virtual Value changeValueAtomicAdd(const Key key, const Value summand) = 0; /** * This operation is equivalent to 'changeValueAtomicAdd(key, -1 * subtrahend)' */ virtual Value changeValueAtomicSubtract(const Key key, const Value subtrahend) { return changeValueAtomicAdd(key, -1 * subtrahend); } virtual size_t size() = 0; virtual void clear() = 0; protected: virtual Value getDefaultValue() = 0; static bool compare_smaller(Value a, Value b) { return a < b; } static bool compare_greater(Value a, Value b) { return a > b; } static Value max(Value a, Value b) { return std::max<Value>(a, b); } static Value min(Value a, Value b) { return std::min<Value>(a, b); } }; template<class Key, class Value> class gm_map_small : public gm_map<Key, Value> { private: map<Key, Value> data; const Value defaultValue; gm_spinlock_t lock; typedef typename map<Key, Value>::iterator Iterator; template<class Function> Value getValue_generic(Function compare) { assert(size() > 0); Iterator iter = data.begin(); Value value = iter->second; for (iter++; iter != data.end(); iter++) { if (compare(iter->second, value)) { value = iter->second; } } return value; } template<class Function> Key getKey_generic(Function compare) { assert(size() > 0); Iterator iter = data.begin(); Key key = iter->first; Value value = iter->second; for (iter++; iter != data.end(); iter++) { if (compare(iter->second, value)) { key = iter->first; value = iter->second; } } return key; } template<class Function> bool hasValue_generic(Function compare, const Key key) { if (size() == 0 || !hasKey_seq(key)) return false; Value value = data[key]; bool result = true; for (Iterator iter = data.begin(); iter != data.end(); iter++) { if (compare(iter->second, value)) result = false; } return result; } protected: Value getDefaultValue() { return defaultValue; } public: gm_map_small(Value defaultValue) : lock(0), defaultValue(defaultValue) { } ~gm_map_small() { } bool hasKey_seq(const Key key) { return data.find(key) != data.end(); } Value getValue(const Key key) { if (hasKey_seq(key)) { return data[key]; } else { return defaultValue; } } void setValue_par(const Key key, Value value) { gm_spinlock_acquire(&lock); setValue_seq(key, value); gm_spinlock_release(&lock); } void setValue_seq(const Key key, Value value) { data[key] = value; } void removeKey_par(const Key key) { gm_spinlock_acquire(&lock); removeKey_seq(key); gm_spinlock_release(&lock); } void removeKey_seq(const Key key) { data.erase(key); } bool hasMaxValue_seq(const Key key) { return hasValue_generic(&gm_map<Key, Value>::compare_greater, key); } bool hasMinValue_seq(const Key key) { return hasValue_generic(&gm_map<Key, Value>::compare_smaller, key); } Key getMaxKey_seq() { return getKey_generic(&gm_map<Key, Value>::compare_greater); } Key getMinKey_seq() { return getKey_generic(&gm_map<Key, Value>::compare_smaller); } Value getMaxValue_seq() { return getValue_generic(&gm_map<Key, Value>::compare_greater); } Value getMinValue_seq() { return getValue_generic(&gm_map<Key, Value>::compare_smaller); } Value changeValueAtomicAdd(const Key key, const Value summand) { Value newValue = summand; gm_spinlock_acquire(&lock); if(hasKey_seq(key)) newValue += data[key]; data[key] = summand; gm_spinlock_release(&lock); return newValue; } size_t size() { return data.size(); } void clear() { data.clear(); } }; template<class Key, class Value> class gm_map_large : public gm_map<Key, Value> { private: const size_t size_; const Value defaultValue; Value* const data; bool * const valid; template<class Function> Value getValue_generic_seq(Function func, const Value initialValue) { Value value = initialValue; #pragma omp parallel { Value value_private = value; #pragma omp for nowait for (Key i = 0; i < size_; i++) { if (valid[i]) value_private = func(value_private, data[i]); } // reduction { Value value_old; Value value_new; do { value_old = value; value_new = func(value_old, value_private); if (value_old == value_new) break; } while (_gm_atomic_compare_and_swap(&(value), value_old, value_new) == false); } } return value; } template<class Function> Value getValue_generic_par(Function func, const Value initialValue) { Value value = initialValue; for (Key i = 0; i < size_; i++) { if (valid[i]) value = func(value, data[i]); } return value; } template<class Function> Key getKey_generic_par(Function compare, const Value initialValue) { Value value = initialValue; Key key = 0; for (Key i = 0; i < size_; i++) { if (valid[i] && compare(data[i], value)) { value = data[i]; key = i; } } return key; } template<class Function> Key getKey_generic_seq(Function compare, const Value initialValue) { Value value = initialValue; Key key = 0; #pragma omp parallel { Value value_private = value; Key key_private = key; #pragma omp for nowait for (Key i = 0; i < size_; i++) { if (valid[i] && compare(data[i], value_private)) { value_private = data[i]; key_private = i; } } // reduction if(compare(value_private, value)) { #pragma omp critical { if(compare(value_private, value)) { value = value_private; key = key_private; } } } } return key; } template<class Function> bool hasValue_generic_seq(Function compare, const Key key) { if (size() == 0 || !hasKey_seq(key)) return false; Value value = data[key]; bool result = true; #pragma omp parallel for for (int i = 0; i < size_; i++) if (valid[i] && compare(data[i], value)) result = false; return result; } template<class Function> bool hasValue_generic_par(Function compare, const Key key) { if (size() == 0 || !hasKey_seq(key)) return false; Value value = data[key]; for (int i = 0; i < size_; i++) if (valid[i] && compare(data[i], value)) return false; } protected: Value getDefaultValue() { return defaultValue; } public: gm_map_large(size_t size, Value defaultValue) : size_(size), data(new Value[size]), valid(new bool[size]), defaultValue(defaultValue) { #pragma omp parallel for for (int i = 0; i < size; i++) { valid[i] = false; } } ~gm_map_large() { delete[] data; delete[] valid; } bool hasKey_seq(const Key key) { return key < size_ && valid[key]; } Value getValue(const Key key) { if (hasKey_seq(key)) return data[key]; else return defaultValue; } void setValue_par(const Key key, Value value) { setValue_seq(key, value); } void setValue_seq(const Key key, Value value) { data[key] = value; valid[key] = true; } void removeKey_par(const Key key) { removeKey_seq(key); } void removeKey_seq(const Key key) { valid[key] = false; } bool hasMaxValue_seq(const Key key) { return hasValue_generic_seq(&gm_map<Key, Value>::compare_greater, key); } bool hasMaxValue_par(const Key key) { return hasValue_generic_par(&gm_map<Key, Value>::compare_greater, key); } bool hasMinValue_seq(const Key key) { return hasValue_generic_seq(&gm_map<Key, Value>::compare_smaller, key); } bool hasMinValue_par(const Key key) { return hasValue_generic_par(&gm_map<Key, Value>::compare_smaller, key); } Key getMaxKey_seq() { return getKey_generic_seq(&gm_map<Key, Value>::compare_greater, gm_get_min<Value>()); } Key getMaxKey_par() { return getKey_generic_par(&gm_map<Key, Value>::compare_greater, gm_get_min<Value>()); } Key getMinKey_seq() { return getKey_generic_seq(&gm_map<Key, Value>::compare_smaller, gm_get_max<Value>()); } Key getMinKey_par() { return getKey_generic_par(&gm_map<Key, Value>::compare_smaller, gm_get_max<Value>()); } Value getMaxValue_seq() { return getValue_generic_seq(&gm_map<Key, Value>::max, gm_get_min<Value>()); } Value getMaxValue_par() { return getValue_generic_par(&gm_map<Key, Value>::max, gm_get_min<Value>()); } Value getMinValue_seq() { return getValue_generic_seq(&gm_map<Key, Value>::min, gm_get_max<Value>()); } Value getMinValue_par() { return getValue_generic_par(&gm_map<Key, Value>::min, gm_get_max<Value>()); } Value changeValueAtomicAdd(const Key key, const Value summand) { Value oldValue; Value newValue; do { oldValue = data[key]; newValue = valid[key] ? (oldValue + summand) : summand; } while (_gm_atomic_compare_and_swap(data + key, oldValue, newValue) == false); valid[key] = true; return newValue; } size_t size() { size_t result = 0; for(int i = 0; i < size_; i++) result += valid[i]; return result; } void clear() { #pragma omp parallel for for (Key key = 0; key < size(); key++) { valid[key] = false; } } }; // Map is implemnted with set of inner-maps template<class Key, class Value> class gm_map_medium : public gm_map<Key, Value> { private: const int innerSize; const Value defaultValue; map<Key, Value>* innerMaps; gm_spinlock_t* locks; typedef typename map<Key, Value>::iterator Iterator; const uint32_t bitmask; inline uint32_t getPositionFromKey(const Key key) { uint32_t P = 0; if (sizeof(Key) == 1) { const uint8_t* c = (const uint8_t*) &key; P = *c; } else if (sizeof(Key) == 2) { const uint16_t* c = (const uint16_t*) &key; P = *c; } else if (sizeof(Key) >= 4) { const uint32_t* c = (const uint32_t*) &key; P = *c; } return P & bitmask; } template<class FunctionCompare, class FunctionMinMax> Value getValue_generic_par(FunctionCompare compare, FunctionMinMax func, const Value initialValue) { Value value = initialValue; for (int i = 0; i < innerSize; i++) { if (innerMaps[i].size() > 0) { for (Iterator iter = innerMaps[i].begin(); iter != innerMaps[i].end(); iter++) { if (compare(iter->second, value)) { value = iter->second; } } } } return value; } template<class FunctionCompare, class FunctionMinMax> Value getValue_generic_seq(FunctionCompare compare, FunctionMinMax func, const Value initialValue) { Value value = initialValue; #pragma omp parallel { Value value_private = initialValue; #pragma omp for nowait for (int i = 0; i < innerSize; i++) { if (innerMaps[i].size() > 0) value_private = func(value_private, getValueAtPosition_generic(i, compare)); } // reduction { Value value_old; Value value_new; do { value_old = value; value_new = func(value_old, value_private); if (value_old == value_new) break; } while (_gm_atomic_compare_and_swap(&(value), value_old, value_new) == false); } } return value; } template<class Function> Value getValueAtPosition_generic(int position, Function compare) { Iterator iter = innerMaps[position].begin(); Value value = iter->second; for (iter++; iter != innerMaps[position].end(); iter++) { if (compare(iter->second, value)) { value = iter->second; } } return value; } template<class Function> Key getKey_generic_seq(Function compare, const Value initialValue) { Key key = 0; Value value = initialValue; #pragma omp parallel for for(int i = 0; i < innerSize; i++) { if(innerMaps[i].size() > 0) { Iterator iter = getKeyAtPosition_generic(i, compare); Key privateKey = iter->first; Value privateValue = iter->second; if(compare(privateValue, value)) { #pragma omp critical if(compare(privateValue, value)) { value = privateValue; key = privateKey; } } } } return key; } template<class Function> Key getKey_generic_par(Function compare, const Value initialValue) { Key key = 0; Value value = initialValue; for(int i = 0; i < innerSize; i++) { if(innerMaps[i].size() > 0) { for (Iterator iter = innerMaps[i].begin(); iter != innerMaps[i].end(); iter++) { if(compare(iter->second, value)) { value = iter->second; key = iter->first; } } } } return key; } template<class Function> Iterator getKeyAtPosition_generic(int position, Function compare) { Iterator iter = innerMaps[position].begin(); Iterator currentBest = iter; for (iter++; iter != innerMaps[position].end(); iter++) { if (compare(iter->second, currentBest->second)) { currentBest = iter; } } return currentBest; } template<class Function> bool hasValue_generic_par(Function compare, const Key key) { uint32_t position = getPositionFromKey(key); Value reference = getValueFromPosition(position, key); for(int i = 0; i < innerSize; i++) { if (innerMaps[i].size() > 0) { for (Iterator iter = innerMaps[i].begin(); iter != innerMaps[i].end(); iter++) { if (compare(iter->second, reference)) return false; } } } return true; } template<class Function> bool hasValue_generic_seq(Function compare, const Key key) { bool result = true; uint32_t position = getPositionFromKey(key); Value reference = getValueFromPosition(position, key); #pragma omp parallel for for(int i = 0; i < innerSize; i++) { bool tmp = hasValueAtPosition_generic(i, compare, reference); if(!tmp) result = false; } return result; } template<class Function> bool hasValueAtPosition_generic(int position, Function compare, const Value reference) { map<Key, Value>& currentMap = innerMaps[position]; if (currentMap.size() == 0) return false; for (Iterator iter = currentMap.begin(); iter != currentMap.end(); iter++) { if (compare(iter->second, reference)) return false; } } bool positionHasKey(int position, const Key key) { return innerMaps[position].find(key) != innerMaps[position].end(); } Value getValueFromPosition(int position, const Key key) { Iterator iter = innerMaps[position].find(key); if(iter != innerMaps[position].end()) return iter->second; else return defaultValue; } void setValueAtPosition(int position, const Key key, Value value) { innerMaps[position][key] = value; } void removeKeyAtPosition(int position, const Key key) { innerMaps[position].erase(key); } static unsigned getBitMask(int innerSize) { unsigned tmpMask = innerSize - 1; return tmpMask; } static int getSize(int threadCount) { int tmpSize = 32; while(tmpSize < threadCount) { tmpSize *= 2; } // we will use only up to 4B for positioninig assert(tmpSize <= 1024*1024*1024); return tmpSize; } protected: Value getDefaultValue() { return defaultValue; } public: gm_map_medium(int threadCount, Value defaultValue) : innerSize(getSize(threadCount)), bitmask(getBitMask(innerSize)), defaultValue(defaultValue) { locks = new gm_spinlock_t[innerSize]; innerMaps = new map<Key, Value>[innerSize]; #pragma omp parallel for for(int i = 0; i < innerSize; i++) { locks[i] = 0; } } ~gm_map_medium() { delete[] innerMaps; delete[] locks; } bool hasKey_seq(const Key key) { uint32_t position = getPositionFromKey(key); return positionHasKey(position, key); } Value getValue(const Key key) { uint32_t position = getPositionFromKey(key); return getValueFromPosition(position, key); } void setValue_par(const Key key, Value value) { uint32_t position = getPositionFromKey(key); gm_spinlock_acquire(locks + position); setValueAtPosition(position, key, value); gm_spinlock_release(locks + position); } void setValue_seq(const Key key, Value value) { uint32_t position = getPositionFromKey(key); setValueAtPosition(position, key, value); } void removeKey_par(const Key key) { uint32_t position = getPositionFromKey(key); gm_spinlock_acquire(locks + position); removeKeyAtPosition(position, key); gm_spinlock_release(locks + position); } void removeKey_seq(const Key key) { uint32_t position = getPositionFromKey(key); removeKeyAtPosition(position, key); } bool hasMaxValue_seq(const Key key) { return hasValue_generic_par(&gm_map<Key, Value>::compare_greater, key); } bool hasMinValue_seq(const Key key) { return hasValue_generic_par(&gm_map<Key, Value>::compare_smaller, key); } Key getMaxKey_seq() { return getKey_generic_seq(&gm_map<Key, Value>::compare_greater, gm_get_min<Value>()); } Key getMaxKey_par() { return getKey_generic_par(&gm_map<Key, Value>::compare_greater, gm_get_min<Value>()); } Key getMinKey_seq() { return getKey_generic_seq(&gm_map<Key, Value>::compare_smaller, gm_get_max<Value>()); } Key getMinKey_par() { return getKey_generic_par(&gm_map<Key, Value>::compare_smaller, gm_get_max<Value>()); } Value getMaxValue_seq() { return getValue_generic_seq(&gm_map<Key, Value>::compare_greater, &gm_map<Key, Value>::max, gm_get_min<Value>()); } Value getMaxValue_par() { return getValue_generic_par(&gm_map<Key, Value>::compare_greater, &gm_map<Key, Value>::max, gm_get_min<Value>()); } Value getMinValue_seq() { return getValue_generic_seq(&gm_map<Key, Value>::compare_smaller, &gm_map<Key, Value>::min, gm_get_max<Value>()); } Value getMinValue_par() { return getValue_generic_par(&gm_map<Key, Value>::compare_smaller, &gm_map<Key, Value>::min, gm_get_max<Value>()); } Value changeValueAtomicAdd(const Key key, const Value summand) { uint32_t position = getPositionFromKey(key); gm_spinlock_acquire(locks + position); Value newValue = summand; if(positionHasKey(position, key)) newValue += getValueFromPosition(position, key); setValueAtPosition(position, key, newValue); gm_spinlock_release(locks + position); return newValue; } size_t size() { size_t size = 0; #pragma omp parallel for reduction(+ : size) for(int i = 0; i < innerSize; i++) { size += innerMaps[i].size(); } return size; } void clear() { #pragma omp parallel for for(int i = 0; i < innerSize; i++) { innerMaps[i].clear(); } } }; #endif /* GM_MAP_H_ */
main.c
// // main.c // omp_sections // // Created by Vicente Cubells Nonell on 06/11/14. // Copyright (c) 2014 Vicente Cubells Nonell. All rights reserved. // #include <stdio.h> #include <omp.h> #define N 100000 int main(int argc, const char * argv[]) { int suma[N], resta[N], A[N], B[N]; int i; omp_set_nested(1); /* Incializar los arreglos */ #pragma omp parallel for private(i) shared(A, B) for (i = 0; i < N; ++i) { A[i] = rand() % 100; B[i] = rand() % 100; } #pragma omp parallel num_threads(2) { #pragma omp master printf("No. hilos externos = %d\n", omp_get_num_threads()); #pragma omp sections { #pragma omp section { //printf("Hilo %d realiza la suma\n", omp_get_thread_num()); #pragma omp parallel for private(i) num_threads(4) for (i = 0; i < N; ++i) { //printf("Hilo interno %d resuelve suma[%d]\n", // omp_get_thread_num(), i); suma[i] = A[i] + B[i]; } } #pragma omp section { //printf("Hilo %d realiza la resta\n", omp_get_thread_num()); #pragma omp parallel for private(i) num_threads(4) for (i = 0; i < N; ++i) { //printf("Hilo interno %d resuelve resta[%d]\n", // omp_get_thread_num(), i); resta[i] = A[i] - B[i]; } } } } return 0; }
GB_bitmap_assign_M_col_template.c
//------------------------------------------------------------------------------ // GB_bitmap_assign_M_col_template: traverse M for GB_COL_ASSIGN //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // M is a (C->vlen)-by-1 hypersparse or sparse matrix, for // GrB_Row_assign (if C is CSR) or GrB_Col_assign (if C is CSC). // C is bitmap/full. M is sparse/hyper, and can be jumbled. { const int64_t *restrict kfirst_Mslice = M_ek_slicing ; const int64_t *restrict klast_Mslice = M_ek_slicing + M_ntasks ; const int64_t *restrict pstart_Mslice = M_ek_slicing + M_ntasks * 2 ; int64_t jC = J [0] ; int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < M_ntasks ; tid++) { int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; int64_t task_cnvals = 0 ; //---------------------------------------------------------------------- // traverse over M (:,kfirst:klast) //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of M(:,k) for this task //------------------------------------------------------------------ ASSERT (k == 0) ; ASSERT (GBH (Mh, k) == 0) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, mvlen) ; //------------------------------------------------------------------ // traverse over M(:,0), the kth vector of M //------------------------------------------------------------------ // for col_assign: M is a single vector, jC = J [0] for (int64_t pM = pM_start ; pM < pM_end ; pM++) { bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iC = Mi [pM] ; int64_t pC = iC + jC * cvlen ; GB_MASK_WORK (pC) ; } } } cnvals += task_cnvals ; } }
cg_aux.c
//#include <vector> //#include "exblas/exdot.hpp" #include "cg_aux.h" //static inline void __attribute__((always_inline)) bblas_dcopy(int bm, int m, double *X, double *Y) void bblas_dcopy(int bm, int m, double *X, double *Y) { int i; for ( i=0; i<m; i+=bm ) { int cs = m - i; int c = cs < bm ? cs : bm; #pragma omp task depend(in:X[i:i+c-1]) depend(out:Y[i:i+c-1]) firstprivate(i,c,m) __t_copy(c, m, X, Y, i, i); } } ////static inline void __attribute__((always_inline)) bblas_ddot(int bm, int m, double *X, double *Y, double *result) //void bblas_ddot(int bm, int m, double *X, double *Y, double *result) //{ // int i; // for ( i=0; i<m; i+=bm ) { // int cs = m - i; // int c = cs < bm ? cs : bm; // //#pragma omp parallel reduction(redFPE:result) // #pragma omp task depend(in:X[i:i+c-1], Y[i:i+c-1]) depend(out:result) firstprivate(i,c,m) //private(result) // __t_dot(c, m, X, Y, i, i, result); // } //} //static inline void __attribute__((always_inline)) bblas_daxpy(int bm, int m, double f, double *X, double *Y) void bblas_daxpy(int bm, int m, double f, double *X, double *Y) { int i; for ( i=0; i<m; i+=bm ) { int cs = m - i; int c = cs < bm ? cs : bm; #pragma omp task depend(in:X[i:i+c-1], f) depend(inout:Y[i:i+c-1]) firstprivate(i, c, m, f) //depend(in:f) __t_axpy(c, m, f, &X[i], &Y[i]); } } //static inline void __attribute__((always_inline)) bblas_dscal(int bm, int m, double f, double *X) void bblas_dscal(int bm, int m, double f, double *X) { int i; for ( i=0; i<m; i+=bm ) { int cs = m - i; int c = cs < bm ? cs : bm; #pragma omp task depend(inout:X[i:i+c-1]) depend(in:f) firstprivate(i, c, m, f) __t_scal(c, m, f, &X[i]); } } void VvecDoublesTasks (int bm, int m, double *src1, double *src2, double *dst) { int i; for ( i=0; i<m; i+=bm ) { int cs = m - i; int c = cs < bm ? cs : bm; #pragma omp task depend(in:src1[i:i+c-1], src2[i:i+c-1]) depend(out:dst[i:i+c-1]) firstprivate(i, c, m) { double *D = &dst[i]; double *S1 = &src1[i]; double *S2 = &src2[i]; for(int ii=0; ii < c; ii++){ D[ii] = S1[ii] * S2[ii]; } } } } /* * BLAS/LAPACK task wrappers * */ //#pragma omp task depend(in:x[initx:initx+bm-1]) depend(out:y[inity:inity+bm-1]) void __t_copy(int bm, int m, double *x, double *y, int initx, int inity) { double *X = &x[initx]; double *Y = &y[inity]; int i_one = 1; BLAS_cp(bm, X, i_one, Y, i_one); } ////#pragma omp task depend(in:x[initx:initx+bm-1], y[inity:inity+bm-1]) depend(out:result)//concurrent(result[0:bn-1]) //void __t_dot(int bm, int m, double *x, double *y, int initx, int inity, double *result) //{ // double *X = &x[initx]; // double *Y = &y[inity]; // // std::vector<double> local_result(NBFPE, 0.0); // // exblas::cpu::exdot<double*, double*, NBFPE> (bm, X, Y, &local_result[0]); // // #pragma omp critical // fpeSum_omp(&local_result[0], &result[0]); //} // //#pragma omp task depend(in:X[0:bm-1], f) depend(out:Y[0:bm-1]) void __t_axpy(int bm, int m, double f, double *X, double *Y) { int i_one = 1; BLAS_axpy(bm, f, X, i_one, Y, i_one); } //#pragma omp task depend(inout:X[0:bm-1],f) void __t_scal(int bm, int m, double f, double *X) { int i_one = 1; BLAS_scal(bm, f, X, i_one); }
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10 #endif #endif #ifndef NTIMES # define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocessor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
TemporalRowConvolution.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/TemporalRowConvolution.c" #else static inline void THNN_(TemporalRowConvolution_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kW, int dW, int padW) { THArgCheck(kW > 0, 5, "kernel size should be greater than zero, but got kW: %d", kW); THArgCheck(dW > 0, 6, "stride should be greater than zero, but got dW: %d", dW); THNN_ARGCHECK(!weight->is_empty() && weight->dim() == 3, 3, weight, "non-empty 3D weight tensor expected, but got: %s"); THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size(0)); } // we're always looking at (possibly batch) x feats x seq int ndim = input->dim(); int dimF = 0; int dimS = 1; if (ndim == 3) { ++dimS; ++dimF; } THNN_ARGCHECK(!input->is_empty() && (ndim == 2 || ndim == 3), 1, input, "non-empty 2D or 3D (batch mode) input tensor expected, but got :%s"); int64_t inputFrameSize = THTensor_sizeLegacyNoScalars(weight, 0); int64_t nInputFrame = input->size(dimS); int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; if (nOutputFrame < 1) { THError("Given input size: (%d x %d). " "Calculated output size: (%d x %d). Output size is too small", inputFrameSize, nInputFrame, inputFrameSize, nOutputFrame); } THNN_CHECK_DIM_SIZE(input, ndim, dimF, inputFrameSize); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimF, inputFrameSize); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimS, nOutputFrame); } } static void THNN_(unfolded_acc_row)( THTensor *finput, THTensor *input, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t c; scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); // #pragma omp parallel for private(c) for (c = 0; c < inputFrameSize; c++) { int64_t kw, x; int64_t ix = 0; for (kw = 0; kw < kW; kw++) { scalar_t *src = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); scalar_t *dst = input_data + c * (nInputFrame); ix = (size_t)(kw); if (dW == 1) { scalar_t *dst_slice = dst + (size_t)(ix); THVector_(cadd)(dst_slice, dst_slice, src, 1, nOutputFrame); } else { for (x = 0; x < nOutputFrame; x++) { scalar_t *dst_slice = dst + (size_t)(ix + x * dW); THVector_(cadd)(dst_slice, dst_slice, src + (size_t)(x), 1, 1); } } } } } static void THNN_(unfolded_copy_row)( THTensor *finput, THTensor *input, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t k; scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); // #pragma omp parallel for private(k) for (k = 0; k < inputFrameSize * kW; k++) { int64_t c = k / kW; int64_t rest = k % kW; int64_t kw = rest % kW; int64_t x; int64_t ix; scalar_t *dst = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); scalar_t *src = input_data + c * (nInputFrame); ix = (size_t)(kw); if (dW == 1) { memcpy(dst, src+(size_t)(ix), sizeof(scalar_t) * (nOutputFrame)); } else { for (x = 0; x < nOutputFrame; x++) { memcpy(dst + (size_t)(x), src + (size_t)(ix + x * dW), sizeof(scalar_t) * 1); } } } } static void THNN_(TemporalRowConvolution_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t i; THTensor *output3d = THTensor_(newWithStorage3d)( THTensor_getStoragePtr(output), output->storage_offset(), inputFrameSize, -1, 1, -1, nOutputFrame, -1); THNN_(unfolded_copy_row)(finput, input, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); THTensor_(zero)(output); if (bias != NULL) { for (i = 0; i < inputFrameSize; i++) THVector_(fill) (THStorage_(data)(THTensor_getStoragePtr(output)) + output->storage_offset() + output->stride(0) * i, THTensor_(get1d)(bias, i), nOutputFrame); } THTensor_(baddbmm)(output3d, 1, output3d, 1, weight, finput); c10::raw::intrusive_ptr::decref(output3d); } void THNN_(TemporalRowConvolution_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, // unused here but needed for Cuda int kW, int dW, int padW, bool featFirst) { int ndim = input->dim(); THTensor *tinput = NULL; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); } else { input = THTensor_(newContiguous)(input); } THNN_(TemporalRowConvolution_shapeCheck)( state, input, NULL, weight, bias, kW, dW, padW); int64_t inputFrameSize = THTensor_sizeLegacyNoScalars(weight, 0); int64_t nInputFrame = input->size(ndim - 1); int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; if (ndim == 2) { /* non-batch mode */ THTensor_(resize3d)(finput, inputFrameSize, kW, nOutputFrame); THTensor_(resize2d)(output, inputFrameSize, nOutputFrame); THTensor_(zero)(finput); THTensor_(zero)(output); THNN_(TemporalRowConvolution_updateOutput_frame) (input, output, weight, bias, finput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } else { int64_t T = input->size(0); int64_t t; THTensor_(resize4d)(finput, T, inputFrameSize, kW, nOutputFrame); THTensor_(resize3d)(output, T, inputFrameSize, nOutputFrame); THTensor_(zero)(finput); THTensor_(zero)(output); #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(TemporalRowConvolution_updateOutput_frame) (input_t, output_t, weight, bias, finput_t, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); c10::raw::intrusive_ptr::decref(input_t); c10::raw::intrusive_ptr::decref(output_t); c10::raw::intrusive_ptr::decref(finput_t); } } if (!featFirst) { // NOTE: output will NOT be contiguous in this case THTensor_(transpose)(output, output, ndim - 1, ndim - 2); c10::raw::intrusive_ptr::decref(tinput); } c10::raw::intrusive_ptr::decref(input); } static void THNN_(TemporalRowConvolution_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { THTensor *gradOutput3d = THTensor_(newWithStorage3d)( THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), inputFrameSize, -1, 1, -1, nOutputFrame, -1); // weight: inputFrameSize x kW x 1 // gradOutput3d: inputFrameSize x 1 x nOutputFrame THTensor_(baddbmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput3d); // fgradInput: inputFrameSize x kW x nOutputFrame c10::raw::intrusive_ptr::decref(gradOutput3d); THTensor_(zero)(gradInput); THNN_(unfolded_acc_row)(fgradInput, gradInput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } void THNN_(TemporalRowConvolution_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kW, int dW, int padW, bool featFirst) { int ndim = input->dim(); THTensor *tinput, *tgradOutput; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); gradOutput = THTensor_(newContiguous)(tgradOutput); } else { input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); } THNN_(TemporalRowConvolution_shapeCheck)(state, input, gradOutput, weight, NULL, kW, dW, padW); int64_t inputFrameSize = THTensor_sizeLegacyNoScalars(weight, 0); int64_t nInputFrame = input->size(ndim - 1); int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; THTensor_(resizeAs)(fgradInput, finput); THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(fgradInput); THTensor_(zero)(gradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 1, 2); if (ndim == 2) { THNN_(TemporalRowConvolution_updateGradInput_frame) (gradInput, gradOutput, tweight, fgradInput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } else { int64_t T = input->size(0); int64_t t; #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(TemporalRowConvolution_updateGradInput_frame) (gradInput_t, gradOutput_t, tweight, fgradInput_t, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); c10::raw::intrusive_ptr::decref(gradInput_t); c10::raw::intrusive_ptr::decref(gradOutput_t); c10::raw::intrusive_ptr::decref(fgradInput_t); } } c10::raw::intrusive_ptr::decref(tweight); if (!featFirst) { // NOTE: gradInput will NOT be contiguous in this case c10::raw::intrusive_ptr::decref(tinput); c10::raw::intrusive_ptr::decref(tgradOutput); THTensor_(transpose)(gradInput, gradInput, ndim - 1, ndim - 2); } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); } static void THNN_(TemporalRowConvolution_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, scalar_t scale) { int64_t i; THTensor *gradOutput3d = THTensor_(newWithStorage3d)( THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, 1, -1, gradOutput->size(1), -1); THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 1, 2); // gradOutput3d: inputFrameSize x 1 x nOutputFrame // finput: inputFrameSize x nOutputFrame x kW THTensor_(baddbmm)(gradWeight, 1, gradWeight, scale, gradOutput3d, tfinput); // gradWeight: inputFrameSize x 1 x kW c10::raw::intrusive_ptr::decref(tfinput); if (gradBias != NULL) { for (i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++) { int64_t k; scalar_t sum = 0; scalar_t *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput3d)) + gradOutput3d->storage_offset() + i * gradOutput3d->stride(0); for (k = 0; k < gradOutput3d->size(2); k++) { sum += data[k]; } (THStorage_(data)(THTensor_getStoragePtr(gradBias)) + gradBias->storage_offset())[i] += scale * sum; } } c10::raw::intrusive_ptr::decref(gradOutput3d); } void THNN_(TemporalRowConvolution_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int kW, int dW, int padW, bool featFirst, accreal scale_) { scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int ndim = input->dim(); THTensor *tinput = NULL; THTensor *tgradOutput = NULL; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); gradOutput = THTensor_(newContiguous)(tgradOutput); } else { input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); } THNN_(TemporalRowConvolution_shapeCheck) (state, input, gradOutput, gradWeight, gradBias, kW, dW, padW); if (ndim == 2) { THNN_(TemporalRowConvolution_accGradParameters_frame)( gradOutput, gradWeight, gradBias, finput, scale); } else { int64_t T = input->size(0); int64_t t; for (t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(TemporalRowConvolution_accGradParameters_frame)( gradOutput_t, gradWeight, gradBias, finput_t, scale); c10::raw::intrusive_ptr::decref(gradOutput_t); c10::raw::intrusive_ptr::decref(finput_t); } } if (!featFirst) { c10::raw::intrusive_ptr::decref(tinput); c10::raw::intrusive_ptr::decref(tgradOutput); } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); } #endif
main.c
#include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif int main() { #if defined(_OPENMP) omp_set_num_threads(4); #endif #pragma omp parallel { #if defined(_OPENMP) printf("Thread ID: %d\n", omp_get_thread_num()); #endif } printf("Done!\n"); return 0; }
clean.h
/**************************************************************************** * VCGLib o o * * Visual and Computer Graphics Library o o * * _ O _ * * Copyright(C) 2004 \/)\/ * * Visual Computing Lab /\/| * * ISTI - Italian National Research Council | * * \ * * All rights reserved. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License (http://www.gnu.org/licenses/gpl.txt) * * for more details. * * * ****************************************************************************/ #ifndef __VCGLIB_CLEAN #define __VCGLIB_CLEAN // VCG headers #include <vcg/complex/complex.h> #include <vcg/simplex/face/pos.h> #include <vcg/simplex/face/topology.h> #include <vcg/simplex/edge/topology.h> #include <vcg/complex/algorithms/closest.h> #include <vcg/space/index/grid_static_ptr.h> #include <vcg/space/index/spatial_hashing.h> #include <vcg/complex/algorithms/update/selection.h> #include <vcg/complex/algorithms/update/flag.h> #include <vcg/complex/algorithms/update/normal.h> #include <vcg/complex/algorithms/update/topology.h> #include <vcg/space/triangle3.h> namespace vcg { namespace tri{ template <class ConnectedMeshType> class ConnectedComponentIterator { public: typedef ConnectedMeshType MeshType; typedef typename MeshType::VertexType VertexType; typedef typename MeshType::VertexPointer VertexPointer; typedef typename MeshType::VertexIterator VertexIterator; typedef typename MeshType::ScalarType ScalarType; typedef typename MeshType::FaceType FaceType; typedef typename MeshType::FacePointer FacePointer; typedef typename MeshType::FaceIterator FaceIterator; typedef typename MeshType::ConstFaceIterator ConstFaceIterator; typedef typename MeshType::FaceContainer FaceContainer; public: void operator ++() { FacePointer fpt=sf.top(); sf.pop(); for(int j=0;j<3;++j) if( !face::IsBorder(*fpt,j) ) { FacePointer l=fpt->FFp(j); if( !tri::IsMarked(*mp,l) ) { tri::Mark(*mp,l); sf.push(l); } } } void start(MeshType &m, FacePointer p) { tri::RequirePerFaceMark(m); mp=&m; while(!sf.empty()) sf.pop(); UnMarkAll(m); assert(p); assert(!p->IsD()); tri::Mark(m,p); sf.push(p); } bool completed() { return sf.empty(); } FacePointer operator *() { return sf.top(); } private: std::stack<FacePointer> sf; MeshType *mp; }; /// /** \addtogroup trimesh */ /*@{*/ /// Class of static functions to clean//restore meshs. template <class CleanMeshType> class Clean { public: typedef CleanMeshType MeshType; typedef typename MeshType::VertexType VertexType; typedef typename MeshType::VertexPointer VertexPointer; typedef typename MeshType::VertexIterator VertexIterator; typedef typename MeshType::ConstVertexIterator ConstVertexIterator; typedef typename MeshType::EdgeIterator EdgeIterator; typedef typename MeshType::EdgePointer EdgePointer; typedef typename MeshType::CoordType CoordType; typedef typename MeshType::ScalarType ScalarType; typedef typename MeshType::FaceType FaceType; typedef typename MeshType::FacePointer FacePointer; typedef typename MeshType::FaceIterator FaceIterator; typedef typename MeshType::ConstFaceIterator ConstFaceIterator; typedef typename MeshType::FaceContainer FaceContainer; typedef typename vcg::Box3<ScalarType> Box3Type; typedef GridStaticPtr<FaceType, ScalarType > TriMeshGrid; /* classe di confronto per l'algoritmo di eliminazione vertici duplicati*/ class RemoveDuplicateVert_Compare{ public: inline bool operator()(VertexPointer const &a, VertexPointer const &b) { return ((*a).cP() == (*b).cP()) ? (a<b): ((*a).cP() < (*b).cP()); } }; /** This function removes all duplicate vertices of the mesh by looking only at their spatial positions. * Note that it does not update any topology relation that could be affected by this like the VT or TT relation. * the reason this function is usually performed BEFORE building any topology information. */ static int RemoveDuplicateVertex( MeshType & m, bool RemoveDegenerateFlag=true) // V1.0 { if(m.vert.size()==0 || m.vn==0) return 0; std::map<VertexPointer, VertexPointer> mp; size_t i,j; VertexIterator vi; int deleted=0; int k=0; size_t num_vert = m.vert.size(); std::vector<VertexPointer> perm(num_vert); for(vi=m.vert.begin(); vi!=m.vert.end(); ++vi, ++k) perm[k] = &(*vi); RemoveDuplicateVert_Compare c_obj; std::sort(perm.begin(),perm.end(),c_obj); j = 0; i = j; mp[perm[i]] = perm[j]; ++i; for(;i!=num_vert;) { if( (! (*perm[i]).IsD()) && (! (*perm[j]).IsD()) && (*perm[i]).P() == (*perm[j]).cP() ) { VertexPointer t = perm[i]; mp[perm[i]] = perm[j]; ++i; Allocator<MeshType>::DeleteVertex(m,*t); deleted++; } else { j = i; ++i; } } for(FaceIterator fi = m.face.begin(); fi!=m.face.end(); ++fi) if( !(*fi).IsD() ) for(k = 0; k < (*fi).VN(); ++k) if( mp.find( (typename MeshType::VertexPointer)(*fi).V(k) ) != mp.end() ) { (*fi).V(k) = &*mp[ (*fi).V(k) ]; } for(EdgeIterator ei = m.edge.begin(); ei!=m.edge.end(); ++ei) if( !(*ei).IsD() ) for(k = 0; k < 2; ++k) if( mp.find( (typename MeshType::VertexPointer)(*ei).V(k) ) != mp.end() ) { (*ei).V(k) = &*mp[ (*ei).V(k) ]; } if(RemoveDegenerateFlag) RemoveDegenerateFace(m); if(RemoveDegenerateFlag && m.en>0) { RemoveDegenerateEdge(m); RemoveDuplicateEdge(m); } return deleted; } class SortedPair { public: SortedPair() {} SortedPair(unsigned int v0, unsigned int v1, EdgePointer _fp) { v[0]=v0;v[1]=v1; fp=_fp; if(v[0]>v[1]) std::swap(v[0],v[1]); } bool operator < (const SortedPair &p) const { return (v[1]!=p.v[1])?(v[1]<p.v[1]): (v[0]<p.v[0]); } bool operator == (const SortedPair &s) const { if( (v[0]==s.v[0]) && (v[1]==s.v[1]) ) return true; return false; } unsigned int v[2]; EdgePointer fp; }; class SortedTriple { public: SortedTriple() {} SortedTriple(unsigned int v0, unsigned int v1, unsigned int v2,FacePointer _fp) { v[0]=v0;v[1]=v1;v[2]=v2; fp=_fp; std::sort(v,v+3); } bool operator < (const SortedTriple &p) const { return (v[2]!=p.v[2])?(v[2]<p.v[2]): (v[1]!=p.v[1])?(v[1]<p.v[1]): (v[0]<p.v[0]); } bool operator == (const SortedTriple &s) const { if( (v[0]==s.v[0]) && (v[1]==s.v[1]) && (v[2]==s.v[2]) ) return true; return false; } unsigned int v[3]; FacePointer fp; }; /** This function removes all duplicate faces of the mesh by looking only at their vertex reference. So it should be called after unification of vertices. Note that it does not update any topology relation that could be affected by this like the VT or TT relation. the reason this function is usually performed BEFORE building any topology information. */ static int RemoveDuplicateFace( MeshType & m) // V1.0 { std::vector<SortedTriple> fvec; for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD()) { fvec.push_back(SortedTriple( tri::Index(m,(*fi).V(0)), tri::Index(m,(*fi).V(1)), tri::Index(m,(*fi).V(2)), &*fi)); } assert (size_t(m.fn) == fvec.size()); std::sort(fvec.begin(),fvec.end()); int total=0; for(int i=0;i<int(fvec.size())-1;++i) { if(fvec[i]==fvec[i+1]) { total++; tri::Allocator<MeshType>::DeleteFace(m, *(fvec[i].fp) ); } } return total; } /** This function removes all duplicate faces of the mesh by looking only at their vertex reference. So it should be called after unification of vertices. Note that it does not update any topology relation that could be affected by this like the VT or TT relation. the reason this function is usually performed BEFORE building any topology information. */ static int RemoveDuplicateEdge( MeshType & m) // V1.0 { if (m.en==0) return 0; std::vector<SortedPair> eVec; for(EdgeIterator ei=m.edge.begin();ei!=m.edge.end();++ei) if(!(*ei).IsD()) { eVec.push_back(SortedPair( tri::Index(m,(*ei).V(0)), tri::Index(m,(*ei).V(1)), &*ei)); } assert (size_t(m.en) == eVec.size()); //for(int i=0;i<fvec.size();++i) qDebug("fvec[%i] = (%i %i %i)(%i)",i,fvec[i].v[0],fvec[i].v[1],fvec[i].v[2],tri::Index(m,fvec[i].fp)); std::sort(eVec.begin(),eVec.end()); int total=0; for(int i=0;i<int(eVec.size())-1;++i) { if(eVec[i]==eVec[i+1]) { total++; tri::Allocator<MeshType>::DeleteEdge(m, *(eVec[i].fp) ); //qDebug("deleting face %i (pos in fvec %i)",tri::Index(m,fvec[i].fp) ,i); } } return total; } static int CountUnreferencedVertex( MeshType& m) { return RemoveUnreferencedVertex(m,false); } /** This function removes that are not referenced by any face. The function updates the vn counter. @param m The mesh @return The number of removed vertices */ static int RemoveUnreferencedVertex( MeshType& m, bool DeleteVertexFlag=true) // V1.0 { FaceIterator fi; EdgeIterator ei; VertexIterator vi; int referredBit = VertexType::NewBitFlag(); int j; int deleted = 0; for(vi=m.vert.begin();vi!=m.vert.end();++vi) (*vi).ClearUserBit(referredBit); for(fi=m.face.begin();fi!=m.face.end();++fi) if( !(*fi).IsD() ) for(j=0;j<(*fi).VN();++j) (*fi).V(j)->SetUserBit(referredBit); for(ei=m.edge.begin();ei!=m.edge.end();++ei) if( !(*ei).IsD() ){ (*ei).V(0)->SetUserBit(referredBit); (*ei).V(1)->SetUserBit(referredBit); } for(vi=m.vert.begin();vi!=m.vert.end();++vi) if( (!(*vi).IsD()) && (!(*vi).IsUserBit(referredBit))) { if(DeleteVertexFlag) Allocator<MeshType>::DeleteVertex(m,*vi); ++deleted; } VertexType::DeleteBitFlag(referredBit); return deleted; } /** Degenerate vertices are vertices that have coords with invalid floating point values, All the faces incident on deleted vertices are also deleted */ static int RemoveDegenerateVertex(MeshType& m) { VertexIterator vi; int count_vd = 0; for(vi=m.vert.begin(); vi!=m.vert.end();++vi) if(math::IsNAN( (*vi).P()[0]) || math::IsNAN( (*vi).P()[1]) || math::IsNAN( (*vi).P()[2]) ) { count_vd++; Allocator<MeshType>::DeleteVertex(m,*vi); } FaceIterator fi; int count_fd = 0; for(fi=m.face.begin(); fi!=m.face.end();++fi) if(!(*fi).IsD()) if( (*fi).V(0)->IsD() || (*fi).V(1)->IsD() || (*fi).V(2)->IsD() ) { count_fd++; Allocator<MeshType>::DeleteFace(m,*fi); } return count_vd; } /** Degenerate faces are faces that are Topologically degenerate, i.e. have two or more vertex reference that link the same vertex (and not only two vertexes with the same coordinates). All Degenerate faces are zero area faces BUT not all zero area faces are degenerate. We do not take care of topology because when we have degenerate faces the topology calculation functions crash. */ static int RemoveDegenerateFace(MeshType& m) { int count_fd = 0; for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi) if(!(*fi).IsD()) { if((*fi).V(0) == (*fi).V(1) || (*fi).V(0) == (*fi).V(2) || (*fi).V(1) == (*fi).V(2) ) { count_fd++; Allocator<MeshType>::DeleteFace(m,*fi); } } return count_fd; } static int RemoveDegenerateEdge(MeshType& m) { int count_ed = 0; for(EdgeIterator ei=m.edge.begin(); ei!=m.edge.end();++ei) if(!(*ei).IsD()) { if((*ei).V(0) == (*ei).V(1) ) { count_ed++; Allocator<MeshType>::DeleteEdge(m,*ei); } } return count_ed; } static int RemoveNonManifoldVertex(MeshType& m) { CountNonManifoldVertexFF(m,true); tri::UpdateSelection<MeshType>::FaceFromVertexLoose(m); int count_removed = 0; FaceIterator fi; for(fi=m.face.begin(); fi!=m.face.end();++fi) if(!(*fi).IsD() && (*fi).IsS()) Allocator<MeshType>::DeleteFace(m,*fi); VertexIterator vi; for(vi=m.vert.begin(); vi!=m.vert.end();++vi) if(!(*vi).IsD() && (*vi).IsS()) { ++count_removed; Allocator<MeshType>::DeleteVertex(m,*vi); } return count_removed; } static int SplitSelectedVertexOnEdgeMesh(MeshType& m) { tri::RequireCompactness(m); tri::UpdateFlags<MeshType>::VertexClearV(m); int count_split = 0; for(size_t i=0;i<m.edge.size();++i) { for(int j=0;j<2;++j) { VertexPointer vp = m.edge[i].V(j); if(vp->IsS()) { if(!vp->IsV()) { m.edge[i].V(j) = &*(tri::Allocator<MeshType>::AddVertex(m,vp->P())); ++count_split; } else { vp->SetV(); } } } } return count_split; } static void SelectNonManifoldVertexOnEdgeMesh(MeshType &m) { tri::RequireCompactness(m); tri::UpdateSelection<MeshType>::VertexClear(m); std::vector<int> cnt(m.vn,0); for(size_t i=0;i<m.edge.size();++i) { cnt[tri::Index(m,m.edge[i].V(0))]++; cnt[tri::Index(m,m.edge[i].V(1))]++; } for(size_t i=0;i<m.vert.size();++i) if(cnt[i]>2) m.vert[i].SetS(); } static void SelectCreaseVertexOnEdgeMesh(MeshType &m, ScalarType AngleRadThr) { tri::RequireCompactness(m); tri::RequireVEAdjacency(m); tri::UpdateTopology<MeshType>::VertexEdge(m); for(size_t i=0;i<m.vert.size();++i) { std::vector<VertexPointer> VVStarVec; edge::VVStarVE(&(m.vert[i]),VVStarVec); if(VVStarVec.size()==2) { CoordType v0 = m.vert[i].P() - VVStarVec[0]->P(); CoordType v1 = m.vert[i].P() - VVStarVec[1]->P(); float angle = M_PI-vcg::Angle(v0,v1); if(angle > AngleRadThr) m.vert[i].SetS(); } } } /// Removal of faces that were incident on a non manifold edge. // Given a mesh with FF adjacency // it search for non manifold vertices and duplicate them. // Duplicated vertices are moved apart according to the move threshold param. // that is a percentage of the average vector from the non manifold vertex to the barycenter of the incident faces. static int SplitNonManifoldVertex(MeshType& m, ScalarType moveThreshold) { RequireFFAdjacency(m); typedef std::pair<FacePointer,int> FaceInt; // a face and the index of the vertex that we have to change // std::vector<std::pair<VertexPointer, std::vector<FaceInt> > >ToSplitVec; SelectionStack<MeshType> ss(m); ss.push(); CountNonManifoldVertexFF(m,true); UpdateFlags<MeshType>::VertexClearV(m); for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { for(int i=0;i<3;i++) if((*fi).V(i)->IsS() && !(*fi).V(i)->IsV()) { (*fi).V(i)->SetV(); face::Pos<FaceType> startPos(&*fi,i); face::Pos<FaceType> curPos = startPos; std::set<FaceInt> faceSet; do { faceSet.insert(std::make_pair(curPos.F(),curPos.VInd())); curPos.NextE(); } while (curPos != startPos); ToSplitVec.push_back(make_pair((*fi).V(i),std::vector<FaceInt>())); typename std::set<FaceInt>::const_iterator iii; for(iii=faceSet.begin();iii!=faceSet.end();++iii) ToSplitVec.back().second.push_back(*iii); } } ss.pop(); // Second step actually add new vertices and split them. typename tri::Allocator<MeshType>::template PointerUpdater<VertexPointer> pu; VertexIterator firstVp = tri::Allocator<MeshType>::AddVertices(m,ToSplitVec.size(),pu); for(size_t i =0;i<ToSplitVec.size();++i) { // qDebug("Splitting Vertex %i",ToSplitVec[i].first-&*m.vert.begin()); VertexPointer np=ToSplitVec[i].first; pu.Update(np); firstVp->ImportData(*np); // loop on the face to be changed, and also compute the movement vector; CoordType delta(0,0,0); for(size_t j=0;j<ToSplitVec[i].second.size();++j) { FaceInt ff=ToSplitVec[i].second[j]; ff.first->V(ff.second)=&*firstVp; delta+=Barycenter(*(ff.first))-np->cP(); } delta /= ToSplitVec[i].second.size(); firstVp->P() = firstVp->P() + delta * moveThreshold; firstVp++; } return ToSplitVec.size(); } // Auxiliary function for sorting the non manifold faces according to their area. Used in RemoveNonManifoldFace struct CompareAreaFP { bool operator ()(FacePointer const& f1, FacePointer const& f2) const { return DoubleArea(*f1) < DoubleArea(*f2); } }; /// Removal of faces that were incident on a non manifold edge. static int RemoveNonManifoldFace(MeshType& m) { FaceIterator fi; int count_fd = 0; std::vector<FacePointer> ToDelVec; for(fi=m.face.begin(); fi!=m.face.end();++fi) if (!fi->IsD()) { if ((!IsManifold(*fi,0))|| (!IsManifold(*fi,1))|| (!IsManifold(*fi,2))) ToDelVec.push_back(&*fi); } std::sort(ToDelVec.begin(),ToDelVec.end(),CompareAreaFP()); for(size_t i=0;i<ToDelVec.size();++i) { if(!ToDelVec[i]->IsD()) { FaceType &ff= *ToDelVec[i]; if ((!IsManifold(ff,0))|| (!IsManifold(ff,1))|| (!IsManifold(ff,2))) { for(int j=0;j<3;++j) if(!face::IsBorder<FaceType>(ff,j)) vcg::face::FFDetach<FaceType>(ff,j); Allocator<MeshType>::DeleteFace(m,ff); count_fd++; } } } return count_fd; } /* The following functions remove faces that are geometrically "bad" according to edges and area criteria. They remove the faces that are out of a given range of area or edges (e.g. faces too large or too small, or with edges too short or too long) but that could be topologically correct. These functions can optionally take into account only the selected faces. */ template<bool Selected> static int RemoveFaceOutOfRangeAreaSel(MeshType& m, ScalarType MinAreaThr=0, ScalarType MaxAreaThr=(std::numeric_limits<ScalarType>::max)()) { FaceIterator fi; int count_fd = 0; MinAreaThr*=2; MaxAreaThr*=2; for(fi=m.face.begin(); fi!=m.face.end();++fi) if(!(*fi).IsD()) if(!Selected || (*fi).IsS()) { const ScalarType doubleArea=DoubleArea<FaceType>(*fi); if((doubleArea<=MinAreaThr) || (doubleArea>=MaxAreaThr) ) { Allocator<MeshType>::DeleteFace(m,*fi); count_fd++; } } return count_fd; } // alias for the old style. Kept for backward compatibility static int RemoveZeroAreaFace(MeshType& m) { return RemoveFaceOutOfRangeArea(m);} // Aliases for the functions that do not look at selection static int RemoveFaceOutOfRangeArea(MeshType& m, ScalarType MinAreaThr=0, ScalarType MaxAreaThr=(std::numeric_limits<ScalarType>::max)()) { return RemoveFaceOutOfRangeAreaSel<false>(m,MinAreaThr,MaxAreaThr); } /** * Is the mesh only composed by quadrilaterals? */ static bool IsBitQuadOnly(const MeshType &m) { typedef typename MeshType::FaceType F; tri::RequirePerFaceFlags(m); for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { unsigned int tmp = fi->Flags()&(F::FAUX0|F::FAUX1|F::FAUX2); if ( tmp != F::FAUX0 && tmp != F::FAUX1 && tmp != F::FAUX2) return false; } return true; } static bool IsFaceFauxConsistent(MeshType &m) { RequirePerFaceFlags(m); RequireFFAdjacency(m); for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD()) { for(int z=0;z<(*fi).VN();++z) { FacePointer fp = fi->FFp(z); int zp = fi->FFi(z); if(fi->IsF(z) != fp->IsF(zp)) return false; } } return true; } /** * Is the mesh only composed by triangles? (non polygonal faces) */ static bool IsBitTriOnly(const MeshType &m) { tri::RequirePerFaceFlags(m); for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) { if ( !fi->IsD() && fi->IsAnyF() ) return false; } return true; } static bool IsBitPolygonal(const MeshType &m){ return !IsBitTriOnly(m); } /** * Is the mesh only composed by quadrilaterals and triangles? (no pentas, etc) * It assumes that the bits are consistent. In that case there can be only a single faux edge. */ static bool IsBitTriQuadOnly(const MeshType &m) { tri::RequirePerFaceFlags(m); typedef typename MeshType::FaceType F; for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { unsigned int tmp = fi->cFlags()&(F::FAUX0|F::FAUX1|F::FAUX2); if ( tmp!=F::FAUX0 && tmp!=F::FAUX1 && tmp!=F::FAUX2 && tmp!=0 ) return false; } return true; } /** * How many quadrilaterals? * It assumes that the bits are consistent. In that case we count the tris with a single faux edge and divide by two. */ static int CountBitQuads(const MeshType &m) { tri::RequirePerFaceFlags(m); typedef typename MeshType::FaceType F; int count=0; for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { unsigned int tmp = fi->cFlags()&(F::FAUX0|F::FAUX1|F::FAUX2); if ( tmp==F::FAUX0 || tmp==F::FAUX1 || tmp==F::FAUX2) count++; } return count / 2; } /** * How many triangles? (non polygonal faces) */ static int CountBitTris(const MeshType &m) { tri::RequirePerFaceFlags(m); int count=0; for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { if (!(fi->IsAnyF())) count++; } return count; } /** * How many polygons of any kind? (including triangles) * it assumes that there are no faux vertexes (e.g vertices completely surrounded by faux edges) */ static int CountBitPolygons(const MeshType &m) { tri::RequirePerFaceFlags(m); int count = 0; for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { if (fi->IsF(0)) count++; if (fi->IsF(1)) count++; if (fi->IsF(2)) count++; } return m.fn - count/2; } /** * The number of polygonal faces is * FN - EN_f (each faux edge hides exactly one triangular face or in other words a polygon of n edges has n-3 faux edges.) * In the general case where a The number of polygonal faces is * FN - EN_f + VN_f * where: * EN_f is the number of faux edges. * VN_f is the number of faux vertices (e.g vertices completely surrounded by faux edges) * as a intuitive proof think to a internal vertex that is collapsed onto a border of a polygon: * it deletes 2 faces, 1 faux edges and 1 vertex so to keep the balance you have to add back the removed vertex. */ static int CountBitLargePolygons(MeshType &m) { tri::RequirePerFaceFlags(m); UpdateFlags<MeshType>::VertexSetV(m); // First loop Clear all referenced vertices for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) for(int i=0;i<3;++i) fi->V(i)->ClearV(); // Second Loop, count (twice) faux edges and mark all vertices touched by non faux edges // (e.g vertexes on the boundary of a polygon) int countE = 0; for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { for(int i=0;i<3;++i) { if (fi->IsF(i)) countE++; else { fi->V0(i)->SetV(); fi->V1(i)->SetV(); } } } // Third Loop, count the number of referenced vertexes that are completely surrounded by faux edges. int countV = 0; for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if (!vi->IsD() && !vi->IsV()) countV++; return m.fn - countE/2 + countV ; } /** * Checks that the mesh has consistent per-face faux edges * (the ones that merges triangles into larger polygons). * A border edge should never be faux, and faux edges should always be * reciprocated by another faux edges. * It requires FF adjacency. */ static bool HasConsistentPerFaceFauxFlag(const MeshType &m) { RequireFFAdjacency(m); RequirePerFaceFlags(m); for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) for (int k=0; k<3; k++) if( ( fi->IsF(k) != fi->cFFp(k)->IsF(fi->cFFi(k)) ) || ( fi->IsF(k) && face::IsBorder(*fi,k)) ) { return false; } return true; } /** * Count the number of non manifold edges in a polylinemesh, e.g. the edges where there are more than 2 incident faces. * */ static int CountNonManifoldEdgeEE( MeshType & m, bool SelectFlag=false) { assert(m.fn == 0 && m.en >0); // just to be sure we are using an edge mesh... RequireEEAdjacency(m); tri::UpdateTopology<MeshType>::EdgeEdge(m); if(SelectFlag) UpdateSelection<MeshType>::VertexClear(m); int nonManifoldCnt=0; SimpleTempData<typename MeshType::VertContainer, int > TD(m.vert,0); // First Loop, just count how many faces are incident on a vertex and store it in the TemporaryData Counter. EdgeIterator ei; for (ei = m.edge.begin(); ei != m.edge.end(); ++ei) if (!ei->IsD()) { TD[(*ei).V(0)]++; TD[(*ei).V(1)]++; } tri::UpdateFlags<MeshType>::VertexClearV(m); // Second Loop, Check that each vertex have been seen 1 or 2 times. for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if (!vi->IsD()) { if( TD[vi] >2 ) { if(SelectFlag) (*vi).SetS(); nonManifoldCnt++; } } return nonManifoldCnt; } /** * Count the number of non manifold edges in a mesh, e.g. the edges where there are more than 2 incident faces. * * Note that this test is not enough to say that a mesh is two manifold, * you have to count also the non manifold vertexes. */ static int CountNonManifoldEdgeFF( MeshType & m, bool SelectFlag=false) { RequireFFAdjacency(m); int nmfBit[3]; nmfBit[0]= FaceType::NewBitFlag(); nmfBit[1]= FaceType::NewBitFlag(); nmfBit[2]= FaceType::NewBitFlag(); UpdateFlags<MeshType>::FaceClear(m,nmfBit[0]+nmfBit[1]+nmfBit[2]); if(SelectFlag){ UpdateSelection<MeshType>::VertexClear(m); UpdateSelection<MeshType>::FaceClear(m); } int edgeCnt = 0; for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) { if (!fi->IsD()) { for(int i=0;i<3;++i) if(!IsManifold(*fi,i)) { if(!(*fi).IsUserBit(nmfBit[i])) { ++edgeCnt; if(SelectFlag) { (*fi).V0(i)->SetS(); (*fi).V1(i)->SetS(); } // follow the ring of faces incident on edge i; face::Pos<FaceType> nmf(&*fi,i); do { if(SelectFlag) nmf.F()->SetS(); nmf.F()->SetUserBit(nmfBit[nmf.E()]); nmf.NextF(); } while(nmf.f != &*fi); } } } } return edgeCnt; } /** Count (and eventually select) non 2-Manifold vertexes of a mesh * e.g. the vertices with a non 2-manif. neighbourhood but that do not belong to not 2-manif edges. * typical situation two cones connected by one vertex. */ static int CountNonManifoldVertexFF( MeshType & m, bool selectVert = true ) { RequireFFAdjacency(m); if(selectVert) UpdateSelection<MeshType>::VertexClear(m); int nonManifoldCnt=0; SimpleTempData<typename MeshType::VertContainer, int > TD(m.vert,0); // First Loop, just count how many faces are incident on a vertex and store it in the TemporaryData Counter. FaceIterator fi; for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { TD[(*fi).V(0)]++; TD[(*fi).V(1)]++; TD[(*fi).V(2)]++; } tri::UpdateFlags<MeshType>::VertexClearV(m); // Second Loop. // mark out of the game the vertexes that are incident on non manifold edges. for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { for(int i=0;i<3;++i) if (!IsManifold(*fi,i)) { (*fi).V0(i)->SetV(); (*fi).V1(i)->SetV(); } } // Third Loop, for safe vertexes, check that the number of faces that you can reach starting // from it and using FF is the same of the previously counted. for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) { for(int i=0;i<3;i++) if(!(*fi).V(i)->IsV()){ (*fi).V(i)->SetV(); face::Pos<FaceType> pos(&(*fi),i); int starSizeFF = pos.NumberOfIncidentFaces(); if (starSizeFF != TD[(*fi).V(i)]) { if(selectVert) (*fi).V(i)->SetS(); nonManifoldCnt++; } } } return nonManifoldCnt; } /// Very simple test of water tightness. No boundary and no non manifold edges. /// Assume that it is orientable. /// It could be debated if a closed non orientable surface is watertight or not. /// /// The rationale of not testing orientability here is that /// it requires FFAdj while this test do not require any adjacency. /// static bool IsWaterTight(MeshType & m) { int edgeNum=0,edgeBorderNum=0,edgeNonManifNum=0; CountEdgeNum(m, edgeNum, edgeBorderNum,edgeNonManifNum); return (edgeBorderNum==0) && (edgeNonManifNum==0); } static void CountEdgeNum( MeshType & m, int &total_e, int &boundary_e, int &non_manif_e ) { std::vector< typename tri::UpdateTopology<MeshType>::PEdge > edgeVec; tri::UpdateTopology<MeshType>::FillEdgeVector(m,edgeVec,true); sort(edgeVec.begin(), edgeVec.end()); // Lo ordino per vertici total_e=0; boundary_e=0; non_manif_e=0; size_t f_on_cur_edge =1; for(size_t i=0;i<edgeVec.size();++i) { if(( (i+1) == edgeVec.size()) || !(edgeVec[i] == edgeVec[i+1])) { ++total_e; if(f_on_cur_edge==1) ++boundary_e; if(f_on_cur_edge>2) ++non_manif_e; f_on_cur_edge=1; } else { ++f_on_cur_edge; } } // end for } static int CountHoles( MeshType & m) { int numholev=0; FaceIterator fi; FaceIterator gi; vcg::face::Pos<FaceType> he; vcg::face::Pos<FaceType> hei; std::vector< std::vector<CoordType> > holes; //indices of vertices vcg::tri::UpdateFlags<MeshType>::VertexClearS(m); gi=m.face.begin(); fi=gi; for(fi=m.face.begin();fi!=m.face.end();fi++)//for all faces do { for(int j=0;j<3;j++)//for all edges { if(fi->V(j)->IsS()) continue; if(face::IsBorder(*fi,j))//found an unvisited border edge { he.Set(&(*fi),j,fi->V(j)); //set the face-face iterator to the current face, edge and vertex std::vector<CoordType> hole; //start of a new hole hole.push_back(fi->P(j)); // including the first vertex numholev++; he.v->SetS(); //set the current vertex as selected he.NextB(); //go to the next boundary edge while(fi->V(j) != he.v)//will we do not encounter the first boundary edge. { CoordType newpoint = he.v->P(); //select its vertex. if(he.v->IsS())//check if this vertex was selected already, because then we have an additional hole. { //cut and paste the additional hole. std::vector<CoordType> hole2; int index = static_cast<int>(find(hole.begin(),hole.end(),newpoint) - hole.begin()); for(unsigned int i=index; i<hole.size(); i++) hole2.push_back(hole[i]); hole.resize(index); if(hole2.size()!=0) //annoying in degenerate cases holes.push_back(hole2); } hole.push_back(newpoint); numholev++; he.v->SetS(); //set the current vertex as selected he.NextB(); //go to the next boundary edge } holes.push_back(hole); } } } return static_cast<int>(holes.size()); } /* Compute the set of connected components of a given mesh it fills a vector of pair < int , faceptr > with, for each connecteed component its size and a represnant */ static int CountConnectedComponents(MeshType &m) { std::vector< std::pair<int,FacePointer> > CCV; return ConnectedComponents(m,CCV); } static int ConnectedComponents(MeshType &m, std::vector< std::pair<int,FacePointer> > &CCV) { tri::RequireFFAdjacency(m); CCV.clear(); tri::UpdateSelection<MeshType>::FaceClear(m); std::stack<FacePointer> sf; FacePointer fpt=&*(m.face.begin()); for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) { if(!((*fi).IsD()) && !(*fi).IsS()) { (*fi).SetS(); CCV.push_back(std::make_pair(0,&*fi)); sf.push(&*fi); while (!sf.empty()) { fpt=sf.top(); ++CCV.back().first; sf.pop(); for(int j=0;j<3;++j) { if( !face::IsBorder(*fpt,j) ) { FacePointer l = fpt->FFp(j); if( !(*l).IsS() ) { (*l).SetS(); sf.push(l); } } } } } } return int(CCV.size()); } static void ComputeValence( MeshType &m, typename MeshType::PerVertexIntHandle &h) { for(VertexIterator vi=m.vert.begin(); vi!= m.vert.end();++vi) h[vi]=0; for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) { if(!((*fi).IsD())) for(int j=0;j<fi->VN();j++) ++h[tri::Index(m,fi->V(j))]; } } /** GENUS. A topologically invariant property of a surface defined as the largest number of non-intersecting simple closed curves that can be drawn on the surface without separating it. Roughly speaking, it is the number of holes in a surface. The genus g of a closed surface, also called the geometric genus, is related to the Euler characteristic by the relation $chi$ by $chi==2-2g$. The genus of a connected, orientable surface is an integer representing the maximum number of cuttings along closed simple curves without rendering the resultant manifold disconnected. It is equal to the number of handles on it. For general polyhedra the <em>Euler Formula</em> is: V - E + F = 2 - 2G - B where V is the number of vertices, F is the number of faces, E is the number of edges, G is the genus and B is the number of <em>boundary polygons</em>. The above formula is valid for a mesh with one single connected component. By considering multiple connected components the formula becomes: V - E + F = 2C - 2Gs - B -> 2Gs = - ( V-E+F +B -2C) where C is the number of connected components and Gs is the sum of the genus of all connected components. Note that in the case of a mesh with boundaries the intuitive meaning of Genus is less intuitive that it could seem. A closed sphere, a sphere with one hole (e.g. a disk) and a sphere with two holes (e.g. a tube) all of them have Genus == 0 */ static int MeshGenus(int nvert,int nedges,int nfaces, int numholes, int numcomponents) { return -((nvert + nfaces - nedges + numholes - 2 * numcomponents) / 2); } static int MeshGenus(MeshType &m) { int nvert=m.vn; int nfaces=m.fn; int boundary_e,total_e,nonmanif_e; CountEdgeNum(m,total_e,boundary_e,nonmanif_e); int numholes=CountHoles(m); int numcomponents=CountConnectedComponents(m); int G=MeshGenus(nvert,total_e,nfaces,numholes,numcomponents); return G; } /** * Check if the given mesh is regular, semi-regular or irregular. * * Each vertex of a \em regular mesh has valence 6 except for border vertices * which have valence 4. * * A \em semi-regular mesh is derived from an irregular one applying * 1-to-4 subdivision recursively. (not checked for now) * * All other meshes are \em irregular. */ static void IsRegularMesh(MeshType &m, bool &Regular, bool &Semiregular) { RequireVFAdjacency(m); Regular = true; VertexIterator vi; // for each vertex the number of edges are count for (vi = m.vert.begin(); vi != m.vert.end(); ++vi) { if (!vi->IsD()) { face::Pos<FaceType> he((*vi).VFp(), &*vi); face::Pos<FaceType> ht = he; int n=0; bool border=false; do { ++n; ht.NextE(); if (ht.IsBorder()) border=true; } while (ht != he); if (border) n = n/2; if ((n != 6)&&(!border && n != 4)) { Regular = false; break; } } } if (!Regular) Semiregular = false; else { // For now we do not account for semi-regularity Semiregular = false; } } static bool IsCoherentlyOrientedMesh(MeshType &m) { for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) for(int i=0;i<3;++i) if(!face::CheckOrientation(*fi,i)) return false; return true; } static void OrientCoherentlyMesh(MeshType &m, bool &Oriented, bool &Orientable) { RequireFFAdjacency(m); assert(&Oriented != &Orientable); assert(m.face.back().FFp(0)); // This algorithms require FF topology initialized Orientable = true; Oriented = true; tri::UpdateSelection<MeshType>::FaceClear(m); std::stack<FacePointer> faces; for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) { if (!fi->IsD() && !fi->IsS()) { // each face put in the stack is selected (and oriented) fi->SetS(); faces.push(&(*fi)); // empty the stack while (!faces.empty()) { FacePointer fp = faces.top(); faces.pop(); // make consistently oriented the adjacent faces for (int j = 0; j < 3; j++) { // get one of the adjacent face FacePointer fpaux = fp->FFp(j); int iaux = fp->FFi(j); if (!fpaux->IsD() && fpaux != fp && face::IsManifold<FaceType>(*fp, j)) { if (!CheckOrientation(*fpaux, iaux)) { Oriented = false; if (!fpaux->IsS()) { face::SwapEdge<FaceType,true>(*fpaux, iaux); assert(CheckOrientation(*fpaux, iaux)); } else { Orientable = false; break; } } // put the oriented face into the stack if (!fpaux->IsS()) { fpaux->SetS(); faces.push(fpaux); } } } } } if (!Orientable) break; } } /// Flip the orientation of the whole mesh flipping all the faces (by swapping the first two vertices) static void FlipMesh(MeshType &m, bool selected=false) { for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) if(!selected || (*fi).IsS()) { face::SwapEdge<FaceType,false>((*fi), 0); if (HasPerWedgeTexCoord(m)) std::swap((*fi).WT(0),(*fi).WT(1)); } } /// Flip a mesh so that its normals are orented outside. /// Just for safety it uses a voting scheme. /// It assumes that /// mesh has already has coherent normals. /// mesh is watertight and signle component. static bool FlipNormalOutside(MeshType &m) { if(m.vert.empty()) return false; tri::UpdateNormal<MeshType>::PerVertexAngleWeighted(m); tri::UpdateNormal<MeshType>::NormalizePerVertex(m); std::vector< VertexPointer > minVertVec; std::vector< VertexPointer > maxVertVec; // The set of directions to be choosen std::vector< CoordType > dirVec; dirVec.push_back(CoordType(1,0,0)); dirVec.push_back(CoordType(0,1,0)); dirVec.push_back(CoordType(0,0,1)); dirVec.push_back(CoordType( 1, 1,1)); dirVec.push_back(CoordType(-1, 1,1)); dirVec.push_back(CoordType(-1,-1,1)); dirVec.push_back(CoordType( 1,-1,1)); for(size_t i=0;i<dirVec.size();++i) { Normalize(dirVec[i]); minVertVec.push_back(&*m.vert.begin()); maxVertVec.push_back(&*m.vert.begin()); } for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if(!(*vi).IsD()) { for(size_t i=0;i<dirVec.size();++i) { if( (*vi).cP().dot(dirVec[i]) < minVertVec[i]->P().dot(dirVec[i])) minVertVec[i] = &*vi; if( (*vi).cP().dot(dirVec[i]) > maxVertVec[i]->P().dot(dirVec[i])) maxVertVec[i] = &*vi; } } int voteCount=0; ScalarType angleThreshold = cos(math::ToRad(85.0)); for(size_t i=0;i<dirVec.size();++i) { // qDebug("Min vert along (%f %f %f) is %f %f %f",dirVec[i][0],dirVec[i][1],dirVec[i][2],minVertVec[i]->P()[0],minVertVec[i]->P()[1],minVertVec[i]->P()[2]); // qDebug("Max vert along (%f %f %f) is %f %f %f",dirVec[i][0],dirVec[i][1],dirVec[i][2],maxVertVec[i]->P()[0],maxVertVec[i]->P()[1],maxVertVec[i]->P()[2]); if(minVertVec[i]->N().dot(dirVec[i]) > angleThreshold ) voteCount++; if(maxVertVec[i]->N().dot(dirVec[i]) < -angleThreshold ) voteCount++; } // qDebug("votecount = %i",voteCount); if(voteCount < int(dirVec.size())/2) return false; FlipMesh(m); return true; } // Search and remove small single triangle folds // - a face has normal opposite to all other faces // - choose the edge that brings to the face f1 containing the vertex opposite to that edge. static int RemoveFaceFoldByFlip(MeshType &m, float normalThresholdDeg=175, bool repeat=true) { RequireFFAdjacency(m); RequirePerVertexMark(m); //Counters for logging and convergence int count, total = 0; do { tri::UpdateTopology<MeshType>::FaceFace(m); tri::UnMarkAll(m); count = 0; ScalarType NormalThrRad = math::ToRad(normalThresholdDeg); ScalarType eps = 0.0001; // this epsilon value is in absolute value. It is a distance from edge in baricentric coords. //detection stage for(FaceIterator fi=m.face.begin();fi!= m.face.end();++fi ) if(!(*fi).IsV()) { Point3<ScalarType> NN = vcg::TriangleNormal((*fi)).Normalize(); if( vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(0)).Normalize()) > NormalThrRad && vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(1)).Normalize()) > NormalThrRad && vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(2)).Normalize()) > NormalThrRad ) { (*fi).SetS(); //(*fi).C()=Color4b(Color4b::Red); // now search the best edge to flip for(int i=0;i<3;i++) { Point3<ScalarType> &p=(*fi).P2(i); Point3<ScalarType> L; bool ret = vcg::InterpolationParameters((*(*fi).FFp(i)),TriangleNormal(*(*fi).FFp(i)),p,L); if(ret && L[0]>eps && L[1]>eps && L[2]>eps) { (*fi).FFp(i)->SetS(); (*fi).FFp(i)->SetV(); //(*fi).FFp(i)->C()=Color4b(Color4b::Green); if(face::CheckFlipEdge<FaceType>( *fi, i )) { face::FlipEdge<FaceType>( *fi, i ); ++count; ++total; } } } } } // tri::UpdateNormal<MeshType>::PerFace(m); } while( repeat && count ); return total; } static int RemoveTVertexByFlip(MeshType &m, float threshold=40, bool repeat=true) { RequireFFAdjacency(m); RequirePerVertexMark(m); //Counters for logging and convergence int count, total = 0; do { tri::UpdateTopology<MeshType>::FaceFace(m); tri::UnMarkAll(m); count = 0; //detection stage for(unsigned int index = 0 ; index < m.face.size(); ++index ) { FacePointer f = &(m.face[index]); float sides[3]; CoordType dummy; sides[0] = Distance(f->P(0), f->P(1)); sides[1] = Distance(f->P(1), f->P(2)); sides[2] = Distance(f->P(2), f->P(0)); // Find largest triangle side int i = std::find(sides, sides+3, std::max( std::max(sides[0],sides[1]), sides[2])) - (sides); if( tri::IsMarked(m,f->V2(i) )) continue; if( PSDist(f->P2(i),f->P(i),f->P1(i),dummy)*threshold <= sides[i] ) { tri::Mark(m,f->V2(i)); if(face::CheckFlipEdge<FaceType>( *f, i )) { // Check if EdgeFlipping improves quality FacePointer g = f->FFp(i); int k = f->FFi(i); Triangle3<ScalarType> t1(f->P(i), f->P1(i), f->P2(i)), t2(g->P(k), g->P1(k), g->P2(k)), t3(f->P(i), g->P2(k), f->P2(i)), t4(g->P(k), f->P2(i), g->P2(k)); if ( std::min( QualityFace(t1), QualityFace(t2) ) < std::min( QualityFace(t3), QualityFace(t4) )) { face::FlipEdge<FaceType>( *f, i ); ++count; ++total; } } } } // tri::UpdateNormal<MeshType>::PerFace(m); } while( repeat && count ); return total; } static int RemoveTVertexByCollapse(MeshType &m, float threshold=40, bool repeat=true) { RequirePerVertexMark(m); //Counters for logging and convergence int count, total = 0; do { tri::UnMarkAll(m); count = 0; //detection stage for(unsigned int index = 0 ; index < m.face.size(); ++index ) { FacePointer f = &(m.face[index]); float sides[3]; CoordType dummy; sides[0] = Distance(f->P(0), f->P(1)); sides[1] = Distance(f->P(1), f->P(2)); sides[2] = Distance(f->P(2), f->P(0)); int i = std::find(sides, sides+3, std::max( std::max(sides[0],sides[1]), sides[2])) - (sides); if( tri::IsMarked(m,f->V2(i) )) continue; if( PSDist(f->P2(i),f->P(i),f->P1(i),dummy)*threshold <= sides[i] ) { tri::Mark(m,f->V2(i)); int j = Distance(dummy,f->P(i))<Distance(dummy,f->P1(i))?i:(i+1)%3; f->P2(i) = f->P(j); tri::Mark(m,f->V(j)); ++count; ++total; } } tri::Clean<MeshType>::RemoveDuplicateVertex(m); tri::Allocator<MeshType>::CompactFaceVector(m); tri::Allocator<MeshType>::CompactVertexVector(m); } while( repeat && count ); return total; } static bool SelfIntersections(MeshType &m, std::vector<FaceType*> &ret) { RequirePerFaceMark(m); ret.clear(); int referredBit = FaceType::NewBitFlag(); tri::UpdateFlags<MeshType>::FaceClear(m,referredBit); TriMeshGrid gM; gM.Set(m.face.begin(),m.face.end()); for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD()) { (*fi).SetUserBit(referredBit); Box3< ScalarType> bbox; (*fi).GetBBox(bbox); std::vector<FaceType*> inBox; vcg::tri::GetInBoxFace(m, gM, bbox,inBox); bool Intersected=false; typename std::vector<FaceType*>::iterator fib; for(fib=inBox.begin();fib!=inBox.end();++fib) { if(!(*fib)->IsUserBit(referredBit) && (*fib != &*fi) ) if(Clean<MeshType>::TestFaceFaceIntersection(&*fi,*fib)){ ret.push_back(*fib); if(!Intersected) { ret.push_back(&*fi); Intersected=true; } } } inBox.clear(); } FaceType::DeleteBitFlag(referredBit); return (ret.size()>0); } /** This function simply test that the vn and fn counters be consistent with the size of the containers and the number of deleted simplexes. */ static bool IsSizeConsistent(MeshType &m) { int DeletedVertNum=0; for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if((*vi).IsD()) DeletedVertNum++; int DeletedEdgeNum=0; for (EdgeIterator ei = m.edge.begin(); ei != m.edge.end(); ++ei) if((*ei).IsD()) DeletedEdgeNum++; int DeletedFaceNum=0; for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if((*fi).IsD()) DeletedFaceNum++; if(size_t(m.vn+DeletedVertNum) != m.vert.size()) return false; if(size_t(m.en+DeletedEdgeNum) != m.edge.size()) return false; if(size_t(m.fn+DeletedFaceNum) != m.face.size()) return false; return true; } /** This function simply test that all the faces have a consistent face-face topology relation. useful for checking that a topology modifying algorithm does not mess something. */ static bool IsFFAdjacencyConsistent(MeshType &m) { RequireFFAdjacency(m); for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) { for(int i=0;i<3;++i) if(!FFCorrectness(*fi, i)) return false; } return true; } /** This function simply test that a mesh has some reasonable tex coord. */ static bool HasConsistentPerWedgeTexCoord(MeshType &m) { tri::RequirePerFaceWedgeTexCoord(m); for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) { FaceType &f=(*fi); if( ! ( (f.WT(0).N() == f.WT(1).N()) && (f.WT(0).N() == (*fi).WT(2).N()) ) ) return false; // all the vertices must have the same index. if((*fi).WT(0).N() <0) return false; // no undefined texture should be allowed } return true; } /** Simple check that there are no face with all collapsed tex coords. */ static bool HasZeroTexCoordFace(MeshType &m) { tri::RequirePerFaceWedgeTexCoord(m); for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) { if( (*fi).WT(0).P() == (*fi).WT(1).P() && (*fi).WT(0).P() == (*fi).WT(2).P() ) return false; } return true; } /** This function test if two triangular faces of a mesh intersect. It assumes that the faces (as storage) are different (e.g different address) If the two faces are different but coincident (same set of vertexes) return true. if the faces share an edge no test is done. if the faces share only a vertex, the opposite edge is tested against the face */ static bool TestFaceFaceIntersection(FaceType *f0,FaceType *f1) { assert(f0!=f1); int sv = face::CountSharedVertex(f0,f1); if(sv==3) return true; if(sv==0) return (vcg::IntersectionTriangleTriangle<FaceType>((*f0),(*f1))); // if the faces share only a vertex, the opposite edge (as a segment) is tested against the face // to avoid degenerate cases where the two triangles have the opposite edge on a common plane // we offset the segment to test toward the shared vertex if(sv==1) { int i0,i1; ScalarType a,b; face::FindSharedVertex(f0,f1,i0,i1); CoordType shP = f0->V(i0)->P()*0.5; if(vcg::IntersectionSegmentTriangle(Segment3<ScalarType>((*f0).V1(i0)->P()*0.5+shP,(*f0).V2(i0)->P()*0.5+shP), *f1, a, b) ) { // a,b are the param coords of the intersection point of the segment. if(a+b>=1 || a<=EPSIL || b<=EPSIL ) return false; return true; } if(vcg::IntersectionSegmentTriangle(Segment3<ScalarType>((*f1).V1(i1)->P()*0.5+shP,(*f1).V2(i1)->P()*0.5+shP), *f0, a, b) ) { // a,b are the param coords of the intersection point of the segment. if(a+b>=1 || a<=EPSIL || b<=EPSIL ) return false; return true; } } return false; } /** This function merge all the vertices that are closer than the given radius */ static int MergeCloseVertex(MeshType &m, const ScalarType radius) { int mergedCnt=0; mergedCnt = ClusterVertex(m,radius); RemoveDuplicateVertex(m,true); return mergedCnt; } static int ClusterVertex(MeshType &m, const ScalarType radius) { if(m.vn==0) return 0; // some spatial indexing structure does not work well with deleted vertices... tri::Allocator<MeshType>::CompactVertexVector(m); typedef vcg::SpatialHashTable<VertexType, ScalarType> SampleSHT; SampleSHT sht; tri::EmptyTMark<MeshType> markerFunctor; std::vector<VertexType*> closests; int mergedCnt=0; sht.Set(m.vert.begin(), m.vert.end()); UpdateFlags<MeshType>::VertexClearV(m); for(VertexIterator viv = m.vert.begin(); viv!= m.vert.end(); ++viv) if(!(*viv).IsD() && !(*viv).IsV()) { (*viv).SetV(); Point3<ScalarType> p = viv->cP(); Box3<ScalarType> bb(p-Point3<ScalarType>(radius,radius,radius),p+Point3<ScalarType>(radius,radius,radius)); GridGetInBox(sht, markerFunctor, bb, closests); // qDebug("Vertex %i has %i closest", &*viv - &*m.vert.begin(),closests.size()); for(size_t i=0; i<closests.size(); ++i) { ScalarType dist = Distance(p,closests[i]->cP()); if(dist < radius && !closests[i]->IsV()) { // printf("%f %f \n",dist,radius); mergedCnt++; closests[i]->SetV(); closests[i]->P()=p; } } } return mergedCnt; } static std::pair<int,int> RemoveSmallConnectedComponentsSize(MeshType &m, int maxCCSize) { std::vector< std::pair<int, typename MeshType::FacePointer> > CCV; int TotalCC=ConnectedComponents(m, CCV); int DeletedCC=0; ConnectedComponentIterator<MeshType> ci; for(unsigned int i=0;i<CCV.size();++i) { std::vector<typename MeshType::FacePointer> FPV; if(CCV[i].first<maxCCSize) { DeletedCC++; for(ci.start(m,CCV[i].second);!ci.completed();++ci) FPV.push_back(*ci); typename std::vector<typename MeshType::FacePointer>::iterator fpvi; for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi) Allocator<MeshType>::DeleteFace(m,(**fpvi)); } } return std::make_pair(TotalCC,DeletedCC); } /// Remove the connected components smaller than a given diameter // it returns a pair with the number of connected components and the number of deleted ones. static std::pair<int,int> RemoveSmallConnectedComponentsDiameter(MeshType &m, ScalarType maxDiameter) { std::vector< std::pair<int, typename MeshType::FacePointer> > CCV; int TotalCC=ConnectedComponents(m, CCV); int DeletedCC=0; tri::ConnectedComponentIterator<MeshType> ci; for(unsigned int i=0;i<CCV.size();++i) { Box3<ScalarType> bb; std::vector<typename MeshType::FacePointer> FPV; for(ci.start(m,CCV[i].second);!ci.completed();++ci) { FPV.push_back(*ci); bb.Add((*ci)->P(0)); bb.Add((*ci)->P(1)); bb.Add((*ci)->P(2)); } if(bb.Diag()<maxDiameter) { DeletedCC++; typename std::vector<typename MeshType::FacePointer>::iterator fpvi; for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi) tri::Allocator<MeshType>::DeleteFace(m,(**fpvi)); } } return std::make_pair(TotalCC,DeletedCC); } /// Remove the connected components greater than a given diameter // it returns a pair with the number of connected components and the number of deleted ones. static std::pair<int,int> RemoveHugeConnectedComponentsDiameter(MeshType &m, ScalarType minDiameter) { std::vector< std::pair<int, typename MeshType::FacePointer> > CCV; int TotalCC=ConnectedComponents(m, CCV); int DeletedCC=0; tri::ConnectedComponentIterator<MeshType> ci; for(unsigned int i=0;i<CCV.size();++i) { Box3f bb; std::vector<typename MeshType::FacePointer> FPV; for(ci.start(m,CCV[i].second);!ci.completed();++ci) { FPV.push_back(*ci); bb.Add((*ci)->P(0)); bb.Add((*ci)->P(1)); bb.Add((*ci)->P(2)); } if(bb.Diag()>minDiameter) { DeletedCC++; typename std::vector<typename MeshType::FacePointer>::iterator fpvi; for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi) tri::Allocator<MeshType>::DeleteFace(m,(**fpvi)); } } return std::make_pair(TotalCC,DeletedCC); } /** Select the folded faces using an angle threshold on the face normal. The face is selected if the dot product between the face normal and the normal of the plane fitted using the vertices of the one ring faces is below the cosThreshold. The cosThreshold requires a negative cosine value (a positive value is clamp to zero). */ static void SelectFoldedFaceFromOneRingFaces(MeshType &m, ScalarType cosThreshold) { tri::RequireVFAdjacency(m); tri::RequirePerFaceNormal(m); tri::RequirePerVertexNormal(m); vcg::tri::UpdateSelection<MeshType>::FaceClear(m); vcg::tri::UpdateNormal<MeshType>::PerFaceNormalized(m); vcg::tri::UpdateNormal<MeshType>::PerVertexNormalized(m); vcg::tri::UpdateTopology<MeshType>::VertexFace(m); if (cosThreshold > 0) cosThreshold = 0; #pragma omp parallel for schedule(dynamic, 10) for (int i = 0; i < m.face.size(); i++) { std::vector<typename MeshType::VertexPointer> nearVertex; std::vector<typename MeshType::CoordType> point; typename MeshType::FacePointer f = &m.face[i]; for (int j = 0; j < 3; j++) { std::vector<typename MeshType::VertexPointer> temp; vcg::face::VVStarVF<typename MeshType::FaceType>(f->V(j), temp); typename std::vector<typename MeshType::VertexPointer>::iterator iter = temp.begin(); for (; iter != temp.end(); iter++) { if ((*iter) != f->V1(j) && (*iter) != f->V2(j)) { nearVertex.push_back((*iter)); point.push_back((*iter)->P()); } } nearVertex.push_back(f->V(j)); point.push_back(f->P(j)); } if (point.size() > 3) { vcg::Plane3<typename MeshType::ScalarType> plane; vcg::FitPlaneToPointSet(point, plane); float avgDot = 0; for (int j = 0; j < nearVertex.size(); j++) avgDot += plane.Direction().dot(nearVertex[j]->N()); avgDot /= nearVertex.size(); typename MeshType::VertexType::NormalType normal; if (avgDot < 0) normal = -plane.Direction(); else normal = plane.Direction(); if (normal.dot(f->N()) < cosThreshold) f->SetS(); } } } }; // end class /*@}*/ } //End Namespace Tri } // End Namespace vcg #endif
GeometryConverter.h
/* -*-c++-*- IfcQuery www.ifcquery.com * MIT License Copyright (c) 2017 Fabian Gerold Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <unordered_set> #include <ifcpp/model/BasicTypes.h> #include <ifcpp/model/BuildingModel.h> #include <ifcpp/model/StatusCallback.h> #include <ifcpp/IFC4/include/IfcCurtainWall.h> #include <ifcpp/IFC4/include/IfcGloballyUniqueId.h> #include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h> #include <ifcpp/IFC4/include/IfcRelAggregates.h> #include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h> #include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h> #include <ifcpp/IFC4/include/IfcSpace.h> #include <ifcpp/IFC4/include/IfcWindow.h> #include "IncludeCarveHeaders.h" #include "GeometryInputData.h" #include "RepresentationConverter.h" #include "CSG_Adapter.h" class GeometryConverter : public StatusCallback { protected: shared_ptr<BuildingModel> m_ifc_model; shared_ptr<GeometrySettings> m_geom_settings; shared_ptr<RepresentationConverter> m_representation_converter; std::map<std::string, shared_ptr<ProductShapeData> > m_product_shape_data; std::map<std::string, shared_ptr<BuildingObject> > m_map_outside_spatial_structure; double m_recent_progress = 0; double m_csg_eps = 1.5e-05; std::map<int, std::vector<shared_ptr<StatusCallback::Message> > > m_messages; #ifdef ENABLE_OPENMP Mutex m_writelock_messages; #endif public: // getters and setters shared_ptr<BuildingModel>& getBuildingModel() { return m_ifc_model; } shared_ptr<RepresentationConverter>& getRepresentationConverter() { return m_representation_converter; } shared_ptr<GeometrySettings>& getGeomSettings() { return m_geom_settings; } std::map<std::string, shared_ptr<ProductShapeData> >& getShapeInputData() { return m_product_shape_data; } std::map<std::string, shared_ptr<BuildingObject> >& getObjectsOutsideSpatialStructure() { return m_map_outside_spatial_structure; } GeometryConverter( shared_ptr<BuildingModel>& ifc_model ) { m_ifc_model = ifc_model; m_geom_settings = shared_ptr<GeometrySettings>( new GeometrySettings() ); resetNumVerticesPerCircle(); shared_ptr<UnitConverter>& unit_converter = m_ifc_model->getUnitConverter(); m_representation_converter = shared_ptr<RepresentationConverter>( new RepresentationConverter( m_geom_settings, unit_converter ) ); // redirect all messages to this->messageTarget m_ifc_model->setMessageTarget( this ); m_representation_converter->setMessageTarget( this ); } virtual ~GeometryConverter() {} void resetModel() { progressTextCallback( L"Unloading model, cleaning up memory..." ); clearInputCache(); m_recent_progress = 0.0; m_ifc_model->clearCache(); m_ifc_model->clearIfcModel(); progressTextCallback( L"Unloading model done" ); progressValueCallback( 0.0, "parse" ); #ifdef _DEBUG GeomDebugDump::clearMeshsetDump(); #endif } void clearInputCache() { m_product_shape_data.clear(); m_map_outside_spatial_structure.clear(); m_representation_converter->clearCache(); m_messages.clear(); } void resetNumVerticesPerCircle() { m_geom_settings->resetNumVerticesPerCircle(); } void setCsgEps(double eps) { m_csg_eps = eps; } void setModel( shared_ptr<BuildingModel> model ) { if( m_ifc_model ) { m_ifc_model->unsetMessageCallBack(); } clearInputCache(); m_ifc_model = model; m_representation_converter->clearCache(); m_representation_converter->setUnitConverter( m_ifc_model->getUnitConverter() ); m_ifc_model->setMessageTarget( this ); } void resolveProjectStructure( shared_ptr<ProductShapeData>& product_data ) { if( !product_data ) { return; } if( product_data->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def(product_data->m_ifc_object_definition); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if (!ifc_product) { return; } product_data->m_added_to_spatial_structure = true; const std::vector<weak_ptr<IfcRelAggregates> >& vec_IsDecomposedBy = ifc_product->m_IsDecomposedBy_inverse; for( size_t ii = 0; ii < vec_IsDecomposedBy.size(); ++ii ) { const weak_ptr<IfcRelAggregates>& rel_aggregates_weak_ptr = vec_IsDecomposedBy[ii]; if( rel_aggregates_weak_ptr.expired() ) { continue; } shared_ptr<IfcRelAggregates> rel_aggregates( rel_aggregates_weak_ptr ); if( rel_aggregates ) { const std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = rel_aggregates->m_RelatedObjects; for( size_t jj = 0; jj < vec_related_objects.size(); ++jj ) { const shared_ptr<IfcObjectDefinition>& related_obj_def = vec_related_objects[jj]; if( related_obj_def ) { std::string related_guid; if (related_obj_def->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; related_guid = converterX.to_bytes(related_obj_def->m_GlobalId->m_value); } auto it_product_map = m_product_shape_data.find(related_guid); if( it_product_map != m_product_shape_data.end() ) { shared_ptr<ProductShapeData>& related_product_shape = it_product_map->second; if( related_product_shape ) { product_data->addChildProduct( related_product_shape, product_data ); resolveProjectStructure( related_product_shape ); } } } } } } shared_ptr<IfcSpatialStructureElement> spatial_ele = dynamic_pointer_cast<IfcSpatialStructureElement>(ifc_product); if( spatial_ele ) { const std::vector<weak_ptr<IfcRelContainedInSpatialStructure> >& vec_contains = spatial_ele->m_ContainsElements_inverse; for( size_t ii = 0; ii < vec_contains.size(); ++ii ) { const weak_ptr<IfcRelContainedInSpatialStructure>& rel_contained_weak_ptr = vec_contains[ii]; if( rel_contained_weak_ptr.expired() ) { continue; } shared_ptr<IfcRelContainedInSpatialStructure> rel_contained( rel_contained_weak_ptr ); if( rel_contained ) { const std::vector<shared_ptr<IfcProduct> >& vec_related_elements = rel_contained->m_RelatedElements; for( size_t jj = 0; jj < vec_related_elements.size(); ++jj ) { const shared_ptr<IfcProduct>& related_product = vec_related_elements[jj]; if( related_product ) { std::string related_guid; if (related_product->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; related_guid = converterX.to_bytes(related_product->m_GlobalId->m_value); } auto it_product_map = m_product_shape_data.find(related_guid); if( it_product_map != m_product_shape_data.end() ) { shared_ptr<ProductShapeData>& related_product_shape = it_product_map->second; if( related_product_shape ) { product_data->addChildProduct( related_product_shape, product_data ); resolveProjectStructure( related_product_shape ); } } } } } } } // TODO: handle IfcRelAssignsToProduct } void readAppearanceFromPropertySet( const shared_ptr<IfcPropertySet>& prop_set, shared_ptr<ProductShapeData>& product_shape ) { if( !prop_set ) { return; } for( auto& ifc_property : prop_set->m_HasProperties ) { if( !ifc_property ) { continue; } shared_ptr<IfcSimpleProperty> simple_property = dynamic_pointer_cast<IfcSimpleProperty>(ifc_property); if( simple_property ) { // ENTITY IfcSimpleProperty ABSTRACT SUPERTYPE OF(ONEOF( IfcPropertyBoundedValue, IfcPropertyEnumeratedValue, IfcPropertyListValue, // IfcPropertyReferenceValue, IfcPropertySingleValue, IfcPropertyTableValue)) shared_ptr<IfcIdentifier> property_name = simple_property->m_Name; std::wstring name_str = property_name->m_value; if( name_str.compare( L"LayerName" ) == 0 ) { // TODO: implement layers } shared_ptr<IfcText> description = simple_property->m_Description; shared_ptr<IfcPropertySingleValue> property_single_value = dynamic_pointer_cast<IfcPropertySingleValue>(simple_property); if( property_single_value ) { //shared_ptr<IfcValue>& nominal_value = property_single_value->m_NominalValue; //optional //shared_ptr<IfcUnit>& unit = property_single_value->m_Unit; //optional } continue; } shared_ptr<IfcComplexProperty> complex_property = dynamic_pointer_cast<IfcComplexProperty>(ifc_property); if( complex_property ) { if( !complex_property->m_UsageName ) continue; if( complex_property->m_UsageName->m_value.compare( L"Color" ) == 0 ) { vec4 vec_color; m_representation_converter->getStylesConverter()->convertIfcComplexPropertyColor( complex_property, vec_color ); shared_ptr<AppearanceData> appearance_data( new AppearanceData( -1 ) ); if( !appearance_data ) { throw OutOfMemoryException( __FUNC__ ); } appearance_data->m_apply_to_geometry_type = AppearanceData::GEOM_TYPE_ANY; appearance_data->m_color_ambient.setColor( vec_color ); appearance_data->m_color_diffuse.setColor( vec_color ); appearance_data->m_color_specular.setColor( vec_color ); appearance_data->m_shininess = 35.f; product_shape->addAppearance( appearance_data ); } } } } /*\brief method convertGeometry: Creates geometry for Carve from previously loaded BuildingModel model. **/ void convertGeometry() { progressTextCallback( L"Creating geometry..." ); progressValueCallback( 0, "geometry" ); m_product_shape_data.clear(); m_map_outside_spatial_structure.clear(); m_representation_converter->clearCache(); if( !m_ifc_model ) { return; } shared_ptr<ProductShapeData> ifc_project_data; std::vector<shared_ptr<IfcObjectDefinition> > vec_object_definitions; double length_to_meter_factor = 1.0; if( m_ifc_model->getUnitConverter() ) { length_to_meter_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor(); } carve::setEpsilon( m_csg_eps ); const std::map<int, shared_ptr<BuildingEntity> >& map_entities = m_ifc_model->getMapIfcEntities(); if (map_entities.size() > 0) { for (auto it = map_entities.begin(); it != map_entities.end(); ++it) { shared_ptr<BuildingEntity> obj = it->second; shared_ptr<IfcObjectDefinition> product = dynamic_pointer_cast<IfcObjectDefinition>(obj); if (product) { vec_object_definitions.push_back(product); } } } // create geometry for for each IfcProduct independently, spatial structure will be resolved later std::map<std::string, shared_ptr<ProductShapeData> >* map_products_ptr = &m_product_shape_data; const int num_object_definitions = (int)vec_object_definitions.size(); #ifdef ENABLE_OPENMP Mutex writelock_map; Mutex writelock_ifc_project; #pragma omp parallel firstprivate(num_object_definitions) shared(map_products_ptr) { // time for one product may vary significantly, so schedule not so many #pragma omp for schedule(dynamic,40) #endif for( int i = 0; i < num_object_definitions; ++i ) { shared_ptr<IfcObjectDefinition> ifc_product = vec_object_definitions[i]; const int entity_id = ifc_product->m_entity_id; std::string guid; if (ifc_product->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; guid = converterX.to_bytes(ifc_product->m_GlobalId->m_value); } shared_ptr<ProductShapeData> product_geom_input_data( new ProductShapeData( entity_id ) ); product_geom_input_data->m_ifc_object_definition = ifc_product; std::stringstream thread_err; if( !m_geom_settings->getRenderObjectFilter()(ifc_product) ) { // geometry will be created in method subtractOpenings continue; } else if( dynamic_pointer_cast<IfcProject>(ifc_product) ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_ifc_project ); #endif ifc_project_data = product_geom_input_data; } try { convertIfcProductShape( product_geom_input_data ); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { thread_err << e.what(); } catch( carve::exception& e ) { thread_err << e.str(); } catch( std::exception& e ) { thread_err << e.what(); } catch( ... ) { thread_err << "undefined error, product id " << entity_id; } { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_map ); #endif map_products_ptr->insert( std::make_pair( guid, product_geom_input_data ) ); if( thread_err.tellp() > 0 ) { messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } } // progress callback double progress = (double)i / (double)num_object_definitions; if( progress - m_recent_progress > 0.02 ) { #ifdef ENABLE_OPENMP if( omp_get_thread_num() == 0 ) #endif { // leave 10% of progress to openscenegraph internals progressValueCallback( progress*0.9, "geometry" ); m_recent_progress = progress; } } } #ifdef ENABLE_OPENMP } // implicit barrier #endif // subtract openings in related objects, such as IFCBUILDINGELEMENTPART connected to a window through IFCRELAGGREGATES for( auto it = map_products_ptr->begin(); it != map_products_ptr->end(); ++it ) { shared_ptr<ProductShapeData> product_geom_input_data = it->second; try { subtractOpeningsInRelatedObjects(product_geom_input_data); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback(e.what(), StatusCallback::MESSAGE_TYPE_ERROR, ""); } catch( carve::exception& e ) { messageCallback(e.str(), StatusCallback::MESSAGE_TYPE_ERROR, ""); } catch( std::exception& e ) { messageCallback(e.what(), StatusCallback::MESSAGE_TYPE_ERROR, ""); } catch( ... ) { messageCallback("undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__); } } try { // now resolve spatial structure if( ifc_project_data ) { resolveProjectStructure( ifc_project_data ); } // check if there are entities that are not in spatial structure for( auto it_product_shapes = m_product_shape_data.begin(); it_product_shapes != m_product_shape_data.end(); ++it_product_shapes ) { shared_ptr<ProductShapeData> product_shape = it_product_shapes->second; if( !product_shape ) { continue; } if( !product_shape->m_added_to_spatial_structure ) { if( !product_shape->m_ifc_object_definition.expired() ) { shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition ); shared_ptr<IfcFeatureElementSubtraction> opening = dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def); if( !m_geom_settings->getRenderObjectFilter()(ifc_object_def) ) { continue; } std::string guid; if (ifc_object_def->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; guid = converterX.to_bytes(ifc_object_def->m_GlobalId->m_value); } m_map_outside_spatial_structure[guid] = ifc_object_def; } } } } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( ... ) { messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } m_representation_converter->getProfileCache()->clearProfileCache(); progressTextCallback( L"Loading file done" ); progressValueCallback( 1.0, "geometry" ); } //\brief method convertIfcProduct: Creates geometry objects (meshset with connected vertex-edge-face graph) from an IfcProduct object // caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock void convertIfcProductShape( shared_ptr<ProductShapeData>& product_shape ) { if( product_shape->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if (!ifc_product) { return; } if( !ifc_product->m_Representation ) { return; } double length_factor = 1.0; if( m_ifc_model ) { if( m_ifc_model->getUnitConverter() ) { length_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor(); } } // evaluate IFC geometry shared_ptr<IfcProductRepresentation>& product_representation = ifc_product->m_Representation; std::vector<shared_ptr<IfcRepresentation> >& vec_representations = product_representation->m_Representations; for( size_t i_representations = 0; i_representations < vec_representations.size(); ++i_representations ) { const shared_ptr<IfcRepresentation>& representation = vec_representations[i_representations]; if( !representation ) { continue; } try { shared_ptr<RepresentationData> representation_data( new RepresentationData() ); m_representation_converter->convertIfcRepresentation( representation, representation_data ); product_shape->m_vec_representations.push_back( representation_data ); representation_data->m_parent_product = product_shape; } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } } // IfcProduct has an ObjectPlacement that can be local or global product_shape->m_object_placement = ifc_product->m_ObjectPlacement; if( ifc_product->m_ObjectPlacement ) { // IfcPlacement2Matrix follows related placements in case of local coordinate systems std::unordered_set<IfcObjectPlacement*> placement_already_applied; m_representation_converter->getPlacementConverter()->convertIfcObjectPlacement( ifc_product->m_ObjectPlacement, product_shape, placement_already_applied, false ); } // handle openings std::vector<shared_ptr<ProductShapeData> > vec_opening_data; const shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product); if( ifc_element ) { m_representation_converter->subtractOpenings(ifc_element, product_shape); } // Fetch the IFCProduct relationships if( ifc_product->m_IsDefinedBy_inverse.size() > 0 ) { std::vector<weak_ptr<IfcRelDefinesByProperties> >& vec_IsDefinedBy_inverse = ifc_product->m_IsDefinedBy_inverse; for( size_t i = 0; i < vec_IsDefinedBy_inverse.size(); ++i ) { shared_ptr<IfcRelDefinesByProperties> rel_def( vec_IsDefinedBy_inverse[i] ); shared_ptr<IfcPropertySetDefinitionSelect> relating_property_definition_select = rel_def->m_RelatingPropertyDefinition; if( relating_property_definition_select ) { // TYPE IfcPropertySetDefinitionSelect = SELECT (IfcPropertySetDefinition ,IfcPropertySetDefinitionSet); shared_ptr<IfcPropertySetDefinition> property_set_def = dynamic_pointer_cast<IfcPropertySetDefinition>(relating_property_definition_select); if( property_set_def ) { shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def); if( property_set ) { readAppearanceFromPropertySet( property_set, product_shape ); } continue; } shared_ptr<IfcPropertySetDefinitionSet> property_set_def_set = dynamic_pointer_cast<IfcPropertySetDefinitionSet>(relating_property_definition_select); if( property_set_def_set ) { std::vector<shared_ptr<IfcPropertySetDefinition> >& vec_propterty_set_def = property_set_def_set->m_vec; std::vector<shared_ptr<IfcPropertySetDefinition> >::iterator it_property_set_def; for( it_property_set_def = vec_propterty_set_def.begin(); it_property_set_def != vec_propterty_set_def.end(); ++it_property_set_def ) { shared_ptr<IfcPropertySetDefinition> property_set_def2 = (*it_property_set_def); if( property_set_def2 ) { shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def2); if( property_set ) { readAppearanceFromPropertySet( property_set, product_shape ); } } } continue; } } } } } void subtractOpeningsInRelatedObjects(shared_ptr<ProductShapeData>& product_shape) { if( product_shape->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if (!ifc_product) { return; } shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product); if( !ifc_element ) { return; } if( ifc_element->m_HasOpenings_inverse.size() == 0 ) { return; } // collect aggregated objects const std::vector<weak_ptr<IfcRelAggregates> >& vec_decomposed_by = ifc_element->m_IsDecomposedBy_inverse; for( auto& decomposed_by : vec_decomposed_by ) { if( decomposed_by.expired() ) { continue; } shared_ptr<IfcRelAggregates> decomposed_by_aggregates(decomposed_by); std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = decomposed_by_aggregates->m_RelatedObjects; for( auto& related_object : vec_related_objects ) { if( !related_object ) { continue; } std::string guid; if (related_object->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; guid = converterX.to_bytes(related_object->m_GlobalId->m_value); auto it_find_related_shape = m_product_shape_data.find(guid); if( it_find_related_shape != m_product_shape_data.end() ) { shared_ptr<ProductShapeData>& related_product_shape = it_find_related_shape->second; m_representation_converter->subtractOpenings(ifc_element, related_product_shape); } } } } } virtual void messageTarget( void* ptr, shared_ptr<StatusCallback::Message> m ) { GeometryConverter* myself = (GeometryConverter*)ptr; if( myself ) { if( m->m_entity ) { #ifdef ENABLE_OPENMP ScopedLock lock( myself->m_writelock_messages ); #endif // make sure that the same message for one entity does not appear several times const int entity_id = m->m_entity->m_entity_id; auto it = myself->m_messages.find( entity_id ); if( it != myself->m_messages.end() ) { std::vector<shared_ptr<StatusCallback::Message> >& vec_message_for_entity = it->second; for( size_t i = 0; i < vec_message_for_entity.size(); ++i ) { shared_ptr<StatusCallback::Message>& existing_message = vec_message_for_entity[i]; if( existing_message->m_message_text.compare( m->m_message_text ) == 0 ) { // same message for same entity is already there, so ignore message return; } } vec_message_for_entity.push_back( m ); } else { std::vector<shared_ptr<StatusCallback::Message> >& vec = myself->m_messages.insert( std::make_pair( entity_id, std::vector<shared_ptr<StatusCallback::Message> >() ) ).first->second; vec.push_back( m ); } } myself->messageCallback( m ); } } };
tzetar.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" //--------------------------------------------------------------------- // block-diagonal matrix-vector multiplication //--------------------------------------------------------------------- void tzetar() { int i, j, k; double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5; double btuz, ac2u, uzik1; if (timeron) timer_start(t_tzetar); #pragma omp parallel for default(shared) \ private(i,j,k,t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,ac2u,uzik1) for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { xvel = us[k][j][i]; yvel = vs[k][j][i]; zvel = ws[k][j][i]; ac = speed[k][j][i]; ac2u = ac*ac; r1 = rhs[k][j][i][0]; r2 = rhs[k][j][i][1]; r3 = rhs[k][j][i][2]; r4 = rhs[k][j][i][3]; r5 = rhs[k][j][i][4]; uzik1 = u[k][j][i][0]; btuz = bt * uzik1; t1 = btuz/ac * (r4 + r5); t2 = r3 + t1; t3 = btuz * (r4 - r5); rhs[k][j][i][0] = t2; rhs[k][j][i][1] = -uzik1*r2 + xvel*t2; rhs[k][j][i][2] = uzik1*r1 + yvel*t2; rhs[k][j][i][3] = zvel*t2 + t3; rhs[k][j][i][4] = uzik1*(-xvel*r2 + yvel*r1) + qs[k][j][i]*t2 + c2iv*ac2u*t1 + zvel*t3; } } } if (timeron) timer_stop(t_tzetar); }
Cycle.c
/* * The MIT License * * Copyright 2020 The OpenNARS authors. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "Cycle.h" static long conceptProcessID = 0; //avoids duplicate concept processing //doing inference within the matched concept, returning whether decisionMaking should continue static Decision Cycle_ActivateSensorimotorConcept(Concept *c, Event *e, long currentTime) { Decision decision = {0}; if(e->truth.confidence > MIN_CONFIDENCE) { c->usage = Usage_use(c->usage, currentTime, false); //add event as spike to the concept: if(e->type == EVENT_TYPE_BELIEF) { c->belief_spike = *e; } else { //pass spike if the concept doesn't have a satisfying motor command decision = Decision_Suggest(c, e, currentTime); } } return decision; } //Process an event, by creating a concept, or activating an existing static Decision Cycle_ProcessSensorimotorEvent(Event *e, long currentTime) { conceptProcessID++; //process the to e related concepts Decision best_decision = {0}; //add a new concept for e if not yet existing Memory_Conceptualize(&e->term, currentTime); e->processed = true; //determine the concept it is related to bool e_hasVariable = Variable_hasVariable(&e->term, true, true, true); for(int i=0; i<UNIFICATION_DEPTH; i++) { ConceptChainElement chain_extended = { .c = Memory_FindConceptByTerm(&e->term), .next = InvertedAtomIndex_GetConceptChain(e->term.atoms[i]) }; ConceptChainElement* chain = &chain_extended; while(chain != NULL) { Concept *c = chain->c; chain = chain->next; if(c != NULL && c->processID != conceptProcessID) { c->processID = conceptProcessID; Event ecp = *e; if(!e_hasVariable) //concept matched to the event which doesn't have variables { Substitution subs = Variable_Unify(&c->term, &e->term); //concept with variables, if(subs.success) { ecp.term = e->term; Decision decision = Cycle_ActivateSensorimotorConcept(c, &ecp, currentTime); if(decision.execute && decision.desire >= best_decision.desire && (!best_decision.specialized || decision.specialized)) { best_decision = decision; } } } else { Substitution subs = Variable_Unify(&e->term, &c->term); //event with variable matched to concept if(subs.success) { bool success; ecp.term = Variable_ApplySubstitute(e->term, subs, &success); if(success) { Decision decision = Cycle_ActivateSensorimotorConcept(c, &ecp, currentTime); if(decision.execute && decision.desire >= best_decision.desire && (!best_decision.specialized || decision.specialized)) { best_decision = decision; } } } } } } } return best_decision; } void Cycle_PopEvents(Event *selectionArray, double *selectionPriority, int *selectedCnt, PriorityQueue *queue, int cnt) { *selectedCnt = 0; for(int i=0; i<cnt; i++) { Event *e; double priority = 0; if(!PriorityQueue_PopMax(queue, (void**) &e, &priority)) { assert(queue->itemsAmount == 0, "No item was popped, only acceptable reason is when it's empty"); IN_DEBUG( puts("Selecting event failed, maybe there is no event left."); ) break; } selectionPriority[*selectedCnt] = priority; selectionArray[*selectedCnt] = *e; //needs to be copied because will be added in a batch (*selectedCnt)++; //that while processing, would make recycled pointers invalid to use } } //Derive a subgoal from a sequence goal //{Event (a &/ b)!, Event a.} |- Event b! Truth_Deduction //if Truth_Expectation(a) >= ANTICIPATION_THRESHOLD else //{Event (a &/ b)!} |- Event a! Truth_StructuralDeduction bool Cycle_GoalSequenceDecomposition(Event *selectedGoal, double selectedGoalPriority) { //1. Extract potential subgoals if(!Narsese_copulaEquals(selectedGoal->term.atoms[0], '+')) //left-nested sequence { return false; } Term componentGoalsTerm[MAX_SEQUENCE_LEN+1] = {0}; Term cur_seq = selectedGoal->term; int i=0; for(; Narsese_copulaEquals(cur_seq.atoms[0], '+'); i++) { assert(i<=MAX_SEQUENCE_LEN, "The sequence was longer than MAX_SEQUENCE_LEN, change your input or increase the parameter!"); componentGoalsTerm[i] = Term_ExtractSubterm(&cur_seq, 2); cur_seq = Term_ExtractSubterm(&cur_seq, 1); } componentGoalsTerm[i] = cur_seq; //the last element at this point //2. Find first subgoal which isn't fulfilled int lastComponentOccurrenceTime = -1; Event newGoal = Inference_EventUpdate(selectedGoal, currentTime); int j=i; for(; j>=0; j--) { Term *componentGoal = &componentGoalsTerm[j]; Substitution best_subs = {0}; Concept *best_c = NULL; double best_exp = 0.0; //the concept with belief event of highest truth exp conceptProcessID++; for(int i=0; i<UNIFICATION_DEPTH; i++) { ConceptChainElement chain_extended = { .c = Memory_FindConceptByTerm(componentGoal), .next = InvertedAtomIndex_GetConceptChain(componentGoal->atoms[i]) }; ConceptChainElement* chain = &chain_extended; while(chain != NULL) { Concept *c = chain->c; chain = chain->next; if(c != NULL && c->processID != conceptProcessID) { c->processID = conceptProcessID; if(!Variable_hasVariable(&c->term, true, true, true)) //concept matched to the event which doesn't have variables { Substitution subs = Variable_Unify(componentGoal, &c->term); //event with variable matched to concept if(subs.success) { bool success = true; if(c->belief_spike.type != EVENT_TYPE_DELETED) { //check whether the temporal order is violated if(c->belief_spike.occurrenceTime < lastComponentOccurrenceTime) { continue; } //check whether belief is too weak (not recent enough or not true enough) if(Truth_Expectation(Truth_Projection(c->belief_spike.truth, c->belief_spike.occurrenceTime, currentTime)) < CONDITION_THRESHOLD) { continue; } //check whether the substitution works for the subgoals coming after it for(int u=j-1; u>=0; u--) { bool goalsubs_success; Variable_ApplySubstitute(componentGoalsTerm[u], subs, &goalsubs_success); if(!goalsubs_success) { success = false; break; } } //Use this specific concept for subgoaling if it has the strongest belief event if(success) { double expectation = Truth_Expectation(Truth_Projection(c->belief_spike.truth, c->belief_spike.occurrenceTime, currentTime)); if(expectation > best_exp) { best_exp = expectation; best_c = c; best_subs = subs; } } } } } } //no need to search another concept, as it didn't have a var so the concept we just iterated is the only one if(!Variable_hasVariable(componentGoal, true, true, true)) { break; } } } //no corresponding belief if(best_c == NULL) { break; } //all components fulfilled? Then nothing to do if(j == 0) { return true; } //Apply substitution implied by the event satisfying the current subgoal to the next subgoals for(int u=j-1; u>=0; u--) { bool goalsubs_success; componentGoalsTerm[u] = Variable_ApplySubstitute(componentGoalsTerm[u], best_subs, &goalsubs_success); assert(goalsubs_success, "Cycle_GoalSequenceDecomposition: The subsitution succeeded before but not now!"); } //build component subgoal according to {(a, b)!, a} |- b! Truth_Deduction lastComponentOccurrenceTime = best_c->belief_spike.occurrenceTime; newGoal = Inference_GoalSequenceDeduction(&newGoal, &best_c->belief_spike, currentTime); newGoal.term = componentGoalsTerm[j-1]; } if(j == i) //we derive first component according to {(a,b)!} |- a! Truth_StructuralDeduction { newGoal.term = componentGoalsTerm[i]; newGoal.truth = Truth_StructuralDeduction(newGoal.truth, newGoal.truth); } Memory_AddEvent(&newGoal, currentTime, selectedGoalPriority * Truth_Expectation(newGoal.truth), 0, false, true, false, false, false); return true; } //Propagate subgoals, leading to decisions static void Cycle_ProcessInputGoalEvents(long currentTime) { Decision best_decision = {0}; //process selected goals for(int i=0; i<goalsSelectedCnt; i++) { Event *goal = &selectedGoals[i]; IN_DEBUG( fputs("selected goal ", stdout); Narsese_PrintTerm(&goal->term); puts(""); ) //if goal is a sequence, overwrite with first deduced non-fulfilled element if(Cycle_GoalSequenceDecomposition(goal, selectedGoalsPriority[i])) //the goal was a sequence which leaded to a subgoal derivation { continue; } Decision decision = Cycle_ProcessSensorimotorEvent(goal, currentTime); if(decision.execute && decision.desire > best_decision.desire && (!best_decision.specialized || decision.specialized)) { best_decision = decision; } } if(best_decision.execute && best_decision.operationID > 0) { //reset cycling goal events after execution to avoid "residue actions" PriorityQueue_INIT(&cycling_goal_events, cycling_goal_events.items, cycling_goal_events.maxElements); //also don't re-add the selected goal: goalsSelectedCnt = 0; //execute decision Decision_Execute(&best_decision); } //pass goal spikes on to the next for(int i=0; i<goalsSelectedCnt && !best_decision.execute; i++) { conceptProcessID++; //process subgoaling for the related concepts for each selected goal Event *goal = &selectedGoals[i]; for(int k=0; k<UNIFICATION_DEPTH; k++) { ConceptChainElement chain_extended = { .c = Memory_FindConceptByTerm(&goal->term), .next = InvertedAtomIndex_GetConceptChain(goal->term.atoms[k]) }; ConceptChainElement* chain = &chain_extended; while(chain != NULL) { Concept *c = chain->c; chain = chain->next; if(c != NULL && c->processID != conceptProcessID && Variable_Unify(&c->term, &goal->term).success) //could be <a --> M>! matching to some <... =/> <$1 --> M>>. { c->processID = conceptProcessID; bool revised; c->goal_spike = Inference_RevisionAndChoice(&c->goal_spike, goal, currentTime, &revised); for(int opi=0; opi<=OPERATIONS_MAX; opi++) { for(int j=0; j<c->precondition_beliefs[opi].itemsAmount; j++) { Implication *imp = &c->precondition_beliefs[opi].array[j]; if(!Memory_ImplicationValid(imp)) { Table_Remove(&c->precondition_beliefs[opi], j); j--; continue; } Term postcondition = Term_ExtractSubterm(&imp->term, 2); Substitution subs = Variable_Unify(&postcondition, &c->goal_spike.term); Implication updated_imp = *imp; bool success; updated_imp.term = Variable_ApplySubstitute(updated_imp.term, subs, &success); if(success) { Event newGoal = Inference_GoalDeduction(&c->goal_spike, &updated_imp); Event newGoalUpdated = Inference_EventUpdate(&newGoal, currentTime); IN_DEBUG( fputs("derived goal ", stdout); Narsese_PrintTerm(&newGoalUpdated.term); puts(""); ) Memory_AddEvent(&newGoalUpdated, currentTime, selectedGoalsPriority[i] * Truth_Expectation(newGoalUpdated.truth), 0, false, true, false, false, false); } } } } } } } } //Reinforce link between concept a and b (creating it if non-existent) static void Cycle_ReinforceLink(Event *a, Event *b) { if(a->type != EVENT_TYPE_BELIEF || b->type != EVENT_TYPE_BELIEF) { return; } Term a_term_nop = Narsese_GetPreconditionWithoutOp(&a->term); Concept *A = Memory_FindConceptByTerm(&a_term_nop); Concept *B = Memory_FindConceptByTerm(&b->term); if(A != NULL && B != NULL && A != B) { //temporal induction if(!Stamp_checkOverlap(&a->stamp, &b->stamp)) { bool success; Implication precondition_implication = Inference_BeliefInduction(a, b, &success); if(success) { precondition_implication.sourceConcept = A; precondition_implication.sourceConceptId = A->id; if(precondition_implication.truth.confidence >= MIN_CONFIDENCE) { //extensional var intro: bool success; Term general_implication_term_ext = IntroduceImplicationVariables(precondition_implication.term, &success, true); if(success && Variable_hasVariable(&general_implication_term_ext, true, true, false)) { NAL_DerivedEvent(general_implication_term_ext, OCCURRENCE_ETERNAL, precondition_implication.truth, precondition_implication.stamp, currentTime, 1, 1, precondition_implication.occurrenceTimeOffset, NULL, 0); } //intensional var intro: bool success2; Term general_implication_term_int = IntroduceImplicationVariables(precondition_implication.term, &success2, false); if(success2 && Variable_hasVariable(&general_implication_term_int, true, true, false)) { NAL_DerivedEvent(general_implication_term_int, OCCURRENCE_ETERNAL, precondition_implication.truth, precondition_implication.stamp, currentTime, 1, 1, precondition_implication.occurrenceTimeOffset, NULL, 0); } //specific implication NAL_DerivedEvent(precondition_implication.term, OCCURRENCE_ETERNAL, precondition_implication.truth, precondition_implication.stamp, currentTime, 1, 1, precondition_implication.occurrenceTimeOffset, NULL, 0); } } } } } void Cycle_PushEvents(long currentTime) { for(int i=0; i<beliefsSelectedCnt; i++) { Memory_AddEvent(&selectedBeliefs[i], currentTime, selectedBeliefsPriority[i], 0, false, false, true, false, false); } for(int i=0; i<goalsSelectedCnt; i++) { Memory_AddEvent(&selectedGoals[i], currentTime, selectedGoalsPriority[i], 0, false, false, true, false, false); } } void Cycle_ProcessInputBeliefEvents(long currentTime) { //1. process newest event if(belief_events.itemsAmount > 0) { //form concepts for the sequences of different length for(int len=MAX_SEQUENCE_LEN-1; len>=0; len--) { Event *toProcess = FIFO_GetNewestSequence(&belief_events, len); if(toProcess != NULL && !toProcess->processed && toProcess->type != EVENT_TYPE_DELETED) { assert(toProcess->type == EVENT_TYPE_BELIEF, "A different event type made it into belief events!"); Cycle_ProcessSensorimotorEvent(toProcess, currentTime); Event postcondition = *toProcess; //Mine for <(&/,precondition,operation) =/> postcondition> patterns in the FIFO: if(len == 0) //postcondition always len1 { int op_id = Narsese_getOperationID(&postcondition.term); Decision_Anticipate(op_id, currentTime); //collection of negative evidence, new way for(int k=1; k<belief_events.itemsAmount; k++) { for(int len2=0; len2<MAX_SEQUENCE_LEN; len2++) { Event *precondition = FIFO_GetKthNewestSequence(&belief_events, k, len2); if(len2 > 0) { Event *potential_op = FIFO_GetKthNewestSequence(&belief_events, k+len2, 0); if(potential_op != NULL && potential_op->type != EVENT_TYPE_DELETED && Narsese_isOperation(&potential_op->term)) { break; } } if(precondition != NULL && precondition->type != EVENT_TYPE_DELETED) { Cycle_ReinforceLink(precondition, &postcondition); } } } } } } } } void Cycle_Inference(long currentTime) { //Inferences #if STAGE==2 for(int i=0; i<beliefsSelectedCnt; i++) { conceptProcessID++; //process the related belief concepts long countConceptsMatched = 0; for(;;) { long countConceptsMatchedNew = 0; //Adjust dynamic firing threshold: (proportional "self"-control) double conceptPriorityThresholdCurrent = conceptPriorityThreshold; long countConceptsMatchedAverage = Stats_countConceptsMatchedTotal / currentTime; double set_point = BELIEF_CONCEPT_MATCH_TARGET; double process_value = countConceptsMatchedAverage; double error = process_value - set_point; double increment = error*CONCEPT_THRESHOLD_ADAPTATION; conceptPriorityThreshold = MIN(1.0, MAX(0.0, conceptPriorityThreshold + increment)); //IN_DEBUG( printf("conceptPriorityThreshold=%f\n", conceptPriorityThreshold); ) Event *e = &selectedBeliefs[i]; double priority = selectedBeliefsPriority[i]; Term dummy_term = {0}; Truth dummy_truth = {0}; RuleTable_Apply(e->term, dummy_term, e->truth, dummy_truth, e->occurrenceTime, 0, e->stamp, currentTime, priority, 1, false, NULL, 0); for(int k=0; k<UNIFICATION_DEPTH; k++) { ConceptChainElement* chain = InvertedAtomIndex_GetConceptChain(e->term.atoms[k]); while(chain != NULL) { Concept *c = chain->c; chain = chain->next; if(c != NULL && c->processID != conceptProcessID) { c->processID = conceptProcessID; long validation_cid = c->id; //allows for lockfree rule table application (only adding to memory is locked) if(c->priority < conceptPriorityThresholdCurrent) { continue; } countConceptsMatchedNew++; countConceptsMatched++; Stats_countConceptsMatchedTotal++; if(c->belief.type != EVENT_TYPE_DELETED && countConceptsMatched <= BELIEF_CONCEPT_MATCH_TARGET) { //use eternal belief as belief Event* belief = &c->belief; Event future_belief = c->predicted_belief; //but if there is a predicted one in the event's window, use this one if(e->occurrenceTime != OCCURRENCE_ETERNAL && future_belief.type != EVENT_TYPE_DELETED && labs(e->occurrenceTime - future_belief.occurrenceTime) < EVENT_BELIEF_DISTANCE) //take event as belief if it's stronger { future_belief.truth = Truth_Projection(future_belief.truth, future_belief.occurrenceTime, e->occurrenceTime); future_belief.occurrenceTime = e->occurrenceTime; belief = &future_belief; } //unless there is an actual belief which falls into the event's window Event project_belief = c->belief_spike; if(e->occurrenceTime != OCCURRENCE_ETERNAL && project_belief.type != EVENT_TYPE_DELETED && labs(e->occurrenceTime - project_belief.occurrenceTime) < EVENT_BELIEF_DISTANCE) //take event as belief if it's stronger { project_belief.truth = Truth_Projection(project_belief.truth, project_belief.occurrenceTime, e->occurrenceTime); project_belief.occurrenceTime = e->occurrenceTime; belief = &project_belief; } //Check for overlap and apply inference rules if(!Stamp_checkOverlap(&e->stamp, &belief->stamp)) { Stamp stamp = Stamp_make(&e->stamp, &belief->stamp); if(PRINT_CONTROL_INFO) { fputs("Apply rule table on ", stdout); Narsese_PrintTerm(&e->term); printf(" Priority=%f\n", priority); fputs(" and ", stdout); Narsese_PrintTerm(&c->term); puts(""); } long occurrenceTimeDistance = 0; if(belief->occurrenceTime != OCCURRENCE_ETERNAL && e->occurrenceTime != OCCURRENCE_ETERNAL) { occurrenceTimeDistance = labs(belief->occurrenceTime - e->occurrenceTime); } RuleTable_Apply(e->term, c->term, e->truth, belief->truth, e->occurrenceTime, occurrenceTimeDistance, stamp, currentTime, priority, c->priority, true, c, validation_cid); } } } } } if(countConceptsMatched > Stats_countConceptsMatchedMax) { Stats_countConceptsMatchedMax = countConceptsMatched; } if(countConceptsMatched >= BELIEF_CONCEPT_MATCH_TARGET || countConceptsMatchedNew == 0) { break; } } } #endif } void Cycle_Prediction(long currentTime) { for(int h=0; h<beliefsSelectedCnt; h++) { Event *e = &selectedBeliefs[h]; double parentpriority = selectedBeliefsPriority[h]; #pragma omp parallel for for(int j=0; j<concepts.itemsAmount; j++) { Concept *c = concepts.items[j].address; if(c->priority < conceptPriorityThreshold) { continue; } for(int k=0; k<c->precondition_beliefs[0].itemsAmount; k++) { if(!Memory_ImplicationValid(&c->precondition_beliefs[0].array[k])) { Table_Remove(&c->precondition_beliefs[0], k--); continue; } Implication *imp = &c->precondition_beliefs[0].array[k]; Term precondition = Term_ExtractSubterm(&imp->term, 1); Substitution subs = Variable_Unify(&precondition, &e->term); if(subs.success) { assert(Narsese_copulaEquals(imp->term.atoms[0],'$'), "Not a valid implication term!"); Concept *c_pre = Memory_FindConceptByTerm(&precondition); if(c_pre != NULL) { Substitution subs = Variable_Unify(&precondition, &e->term); Implication updated_imp = *imp; bool success; updated_imp.term = Variable_ApplySubstitute(updated_imp.term, subs, &success); if(success) { Event predicted = Inference_BeliefDeduction(e, &updated_imp); #pragma omp critical(Memory) { Memory_AddEvent(&predicted, currentTime, parentpriority*Truth_Expectation(predicted.truth), 0, false, true, false, false, true); } } } } } } } } void Cycle_RelativeForgetting(long currentTime) { //Apply event forgetting: for(int i=0; i<cycling_belief_events.itemsAmount; i++) { cycling_belief_events.items[i].priority *= EVENT_DURABILITY; } for(int i=0; i<cycling_goal_events.itemsAmount; i++) { cycling_goal_events.items[i].priority *= EVENT_DURABILITY; } //Apply concept forgetting: for(int i=0; i<concepts.itemsAmount; i++) { Concept *c = concepts.items[i].address; c->priority *= CONCEPT_DURABILITY; concepts.items[i].priority = Usage_usefulness(c->usage, currentTime); //how concept memory is sorted by, by concept usefulness } //BEGIN SPECIAL HANDLING FOR USER KNOWLEDGE if(ontology_handling) { //BEGIN SPECIAL HANDLING FOR USER KNOWLEDGE for(int i=0; i<concepts.itemsAmount; i++) { Concept *c = concepts.items[i].address; if(c->hasUserKnowledge) { c->usage = Usage_use(c->usage, currentTime, false); //user implication won't be forgotten } } } //END SPECIAL HANDLING FOR USER KNOWLEDGE //Re-sort queues PriorityQueue_Rebuild(&concepts); PriorityQueue_Rebuild(&cycling_belief_events); PriorityQueue_Rebuild(&cycling_goal_events); } void Cycle_Perform(long currentTime) { Metric_send("NARNode.Cycle", 1); //1. Retrieve BELIEF/GOAL_EVENT_SELECTIONS events from cyclings events priority queue (which includes both input and derivations) Cycle_PopEvents(selectedGoals, selectedGoalsPriority, &goalsSelectedCnt, &cycling_goal_events, GOAL_EVENT_SELECTIONS); Cycle_PopEvents(selectedBeliefs, selectedBeliefsPriority, &beliefsSelectedCnt, &cycling_belief_events, BELIEF_EVENT_SELECTIONS); //2. Process incoming belief events from FIFO, building implications utilizing input sequences Cycle_ProcessInputBeliefEvents(currentTime); //3. Process incoming goal events, propagating subgoals according to implications, triggering decisions when above decision threshold Cycle_ProcessInputGoalEvents(currentTime); //4. Perform inference between in 1. retrieved events and semantically/temporally related, high-priority concepts to derive and process new events Cycle_Inference(currentTime); Cycle_Prediction(currentTime); //5. Apply relative forgetting for concepts according to CONCEPT_DURABILITY and events according to BELIEF_EVENT_DURABILITY Cycle_RelativeForgetting(currentTime); //6. Push in 1. selected events back to the queue as well, applying relative forgetting based on BELIEF_EVENT_DURABILITY_ON_USAGE Cycle_PushEvents(currentTime); }
hybrid-hello.c
#include <stdio.h> #include <mpi.h> #include <omp.h> int main(int argc, char *argv[]) { int my_id, omp_rank; int provided, required=MPI_THREAD_FUNNELED; MPI_Init_thread(&argc, &argv, required, &provided); MPI_Comm_rank(MPI_COMM_WORLD, &my_id); #pragma omp parallel private(omp_rank) { omp_rank = omp_get_thread_num(); printf("I'm thread %d in process %d\n", omp_rank, my_id); } MPI_Finalize(); }
SE1P_rsrc_cell_force.c
#include "mex.h" #include "mex_compat.h" #include "math.h" #include "cell_list.h" #ifdef INTEL_MKL #include "mkl.h" #endif #define X prhs[0] // Source locations #define F prhs[1] // Source strengths #define RC prhs[2] // cutoff #define XI prhs[3] // Ewald Param #define P prhs[4] // Periodic wrap #define BOX prhs[5] // domain size #define U plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif #ifdef _OPENMP #define CRITICAL _Pragma("omp critical") #else #define CRITICAL #endif #define PI 3.141592653589793 inline double norm2(double * a) { return a[0]*a[0] + a[1]*a[1] + a[2]*a[2]; } // Compute buffer stuff #define BUF_SIZE 256 typedef struct { int n; int idx_t[BUF_SIZE]; double rvec[3*BUF_SIZE]; double rsq[BUF_SIZE]; } ComputeBuffer; static void buffer_push(ComputeBuffer* buffer, int idx_t, double* rvec, double rsq) { int n = buffer->n; buffer->idx_t[n] = idx_t; for(int i=0; i<3; i++) buffer->rvec[3*n + i] = rvec[i]; buffer->rsq[n] = rsq; buffer->n = n + 1; } static void empty_buffer(ComputeBuffer* buffer, const double* restrict x, const double* restrict f, double* restrict u, int idx_s, double xi) { int N = buffer->n; int idx_t; double rvec[3], fs, ft; double xi2 = xi*xi; double us[3] = {0, 0, 0}; fs = f[idx_s]; // Do what we can to help the compiler vectorize exp and erfc, if possible const double* restrict r2 = buffer->rsq; double c1[BUF_SIZE]; #ifdef INTEL_MKL double r[BUF_SIZE]; double xir[BUF_SIZE]; double xi2r2[BUF_SIZE]; for (int n=0; n<N; n++) { r[n] = sqrt(r2[n]); xir[n] = xi*r[n]; xi2r2[n] = -xi2*r2[n]; } double erfc_vec[BUF_SIZE]; double exp_vec[BUF_SIZE]; vdErfc(N, xir, erfc_vec); vdExp(N, xi2r2, exp_vec); for (int n=0; n<N; n++) { double xiexp = xi*exp_vec[n]; c1[n] = -(2.0*xiexp / sqrt(PI) + erfc_vec[n] / r[n])/ r2[n]; } #else for (int n=0; n<N; n++) { double r = sqrt(r2[n]); double xiexp = xi*exp(-xi2*r2[n]); c1[n] = -(2.0*xiexp / sqrt(PI) + erfc(xi*r) / r) /r2[n]; } #endif // Compute interactions for (int n=0; n<N; n++) { idx_t = buffer->idx_t[n]; ft = f[idx_t]; for(int i=0; i<3; i++) rvec[i] = buffer->rvec[n*3 + i]; for(int i=0; i<3; i++) { u[idx_t*3+i] += fs*c1[n]*rvec[i]; us[i] -= ft*c1[n]*rvec[i]; } } for(int i=0; i<3; i++) u[idx_s*3 + i] += us[i]; buffer->n = 0; } // Entry point void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { // input target const int N = mxGetN(X); const double xi = (double) mxGetScalar(XI); const double rc = (double) mxGetScalar(RC); const double rcsq = rc*rc; const double p = (double) mxGetScalar(P); const double* x = mxGetPr(X); const double* f = mxGetPr(F); const double* box = mxGetPr(BOX); // output U = mxCreateDoubleMatrix(3, N, mxREAL); double* restrict u_out = mxGetPr(U); // Setup cell list variables int ncell[3]; int* restrict cell_list; int* restrict cell_idx; double rn; int px[27] = {-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1}; int py[27] = {-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1}; int pz[27] = {-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1}; // Build cell list build_cell_list(x, N, box, rc, &rn, ncell, &cell_list, &cell_idx); #ifdef _OPENMP #pragma omp parallel #endif { // Begin parallel section // Create a thread-local compute buffer ComputeBuffer buffer; // Setup local output double* restrict u; CRITICAL { u = __MALLOC(3*N*sizeof(double)); } for(int i=0;i<3*N;i++) u[i] = 0.0; // Main loop #ifdef _OPENMP #pragma omp for schedule(dynamic) nowait #endif for (int idx_s=0; idx_s<N; idx_s++) { double xs[3]; int home_cell[3], icell[3]; for(int i=0; i<3; i++) { // Source point xs[i] = x[idx_s*3 + i]; // Determine home cell home_cell[i] = xs[i]/rn; } // Iterate through near cells (including home cell) buffer.n = 0; for(int ip=0; ip<27; ip++) { // Get neigh cell icell[0] = home_cell[0] + px[ip]; icell[1] = home_cell[1] + py[ip]; icell[2] = home_cell[2] + pz[ip]; // Stop at boundaries int inside = 1; for(int j=0; j<3; j++) { if (icell[j] < 0 || icell[j] == ncell[j]) inside = 0; } if (!inside) continue; int icell_idx = icell[0] + icell[1]*ncell[0] + icell[2]*ncell[1]*ncell[0]; // Go through cell list int cell_a = cell_idx[icell_idx]; int cell_b = cell_idx[icell_idx+1]; for(int point_idx=cell_a; point_idx<cell_b; point_idx++) { int idx_t = cell_list[point_idx]; if (idx_s >= idx_t) continue; double rvec[3]; // periodic wrap for (int j=-p; j<=p; j++) { double pshift[] = {j*box[0],0,0}; for(int i=0; i<3; i++) rvec[i] = x[idx_t*3 + i] - pshift[i] - xs[i]; double r2 = norm2(rvec); if (r2 > rcsq) continue; buffer_push(&buffer, idx_t, rvec, r2); if (buffer.n == BUF_SIZE) empty_buffer(&buffer, x, f, u, idx_s, xi); } } } empty_buffer(&buffer, x, f, u, idx_s, xi); } // End of particle loop, collect results CRITICAL { for(int i=0; i<3*N; i++) u_out[i] += u[i]; } // free/malloc not thread safe under MEX CRITICAL { __FREE(u); } } }
queue.h
// -*- C++ -*- // Copyright (C) 2007-2015 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/queue.h * @brief Lock-free double-ended queue. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUEUE_H #define _GLIBCXX_PARALLEL_QUEUE_H 1 #include <parallel/types.h> #include <parallel/base.h> #include <parallel/compatibility.h> /** @brief Decide whether to declare certain variable volatile in this file. */ #define _GLIBCXX_VOLATILE volatile namespace __gnu_parallel { /**@brief Double-ended queue of bounded size, allowing lock-free * atomic access. push_front() and pop_front() must not be called * concurrently to each other, while pop_back() can be called * concurrently at all times. * @c empty(), @c size(), and @c top() are intentionally not provided. * Calling them would not make sense in a concurrent setting. * @param _Tp Contained element type. */ template<typename _Tp> class _RestrictedBoundedConcurrentQueue { private: /** @brief Array of elements, seen as cyclic buffer. */ _Tp* _M_base; /** @brief Maximal number of elements contained at the same time. */ _SequenceIndex _M_max_size; /** @brief Cyclic __begin and __end pointers contained in one atomically changeable value. */ _GLIBCXX_VOLATILE _CASable _M_borders; public: /** @brief Constructor. Not to be called concurrent, of course. * @param __max_size Maximal number of elements to be contained. */ _RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size) { _M_max_size = __max_size; _M_base = new _Tp[__max_size]; _M_borders = __encode2(0, 0); #pragma omp flush } /** @brief Destructor. Not to be called concurrent, of course. */ ~_RestrictedBoundedConcurrentQueue() { delete[] _M_base; } /** @brief Pushes one element into the queue at the front end. * Must not be called concurrently with pop_front(). */ void push_front(const _Tp& __t) { _CASable __former_borders = _M_borders; int __former_front, __former_back; __decode2(__former_borders, __former_front, __former_back); *(_M_base + __former_front % _M_max_size) = __t; #if _GLIBCXX_ASSERTIONS // Otherwise: front - back > _M_max_size eventually. _GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back) <= _M_max_size); #endif __fetch_and_add(&_M_borders, __encode2(1, 0)); } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_front(_Tp& __t) { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { // Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front - 1, __former_back); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + (__former_front - 1) % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_back(_Tp& __t) //queue behavior { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { // Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front, __former_back + 1); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + __former_back % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } }; } //namespace __gnu_parallel #undef _GLIBCXX_VOLATILE #endif /* _GLIBCXX_PARALLEL_QUEUE_H */
reader.h
#ifndef __READER_H__ #define __READER_H__ #include <fstream> #include <random> #include <cstring> #include <iostream> #include <sstream> #include <iterator> #include <algorithm> #ifdef _OPENMP #include <omp.h> #endif template<typename T> void splitData(const std::vector<T>& A, const std::vector<T>& b, const std::vector<T>& w, std::vector<T>& trainX, std::vector<T>& trainY, std::vector<T>& trainW, std::vector<T>& validX, std::vector<T>& validY, std::vector<T>& validW, double validFraction, int intercept) { using namespace std; cout << "START TRAIN/VALID SPLIT" << endl; // Split A/b into train/valid, via head/tail size_t m = b.size(); size_t mValid = static_cast<size_t>(validFraction * static_cast<double>(m)); size_t mTrain = m - mValid; size_t n = A.size() / m; // If intercept == 1, add one extra column at the end, all constant 1s n += intercept; trainX.resize(mTrain * n); // TODO FIXME: Should just point trainX to part of A to save memory trainY.resize(mTrain); trainW.resize(mTrain); #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < mTrain; ++i) { //rows trainY[i] = b[i]; trainW[i] = w[i]; // cout << "y[" << i << "] = " << trainY[i] << endl; for (int j = 0; j < n - intercept; ++j) { //cols trainX[i * n + j] = A[i * (n-intercept) + j]; // cout << "X[" << i << ", " << j << "] = " << trainX[i*n+j] << endl; } if (intercept) { trainX[i * n + n - 1] = 1; } } if (mValid > 0) { validX.resize(mValid * n); validY.resize(mValid); validW.resize(mValid); #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < mValid; ++i) { //rows validY[i] = b[mTrain + i]; validW[i] = w[mTrain + i]; for (int j = 0; j < n - intercept; ++j) { //cols validX[i * n + j] = A[(mTrain + i) * (n-intercept) + j]; } if (intercept) { validX[i * n + n - 1] = 1; } } } cout << "END TRAIN/VALID SPLIT" << endl; fflush(stdout); } template<typename T> int fillData(size_t m, size_t n, // only used if name.empty() const std::string &file, std::vector<T>& A, std::vector<T>& b, std::vector<T>& w) { std::default_random_engine generator; std::uniform_real_distribution <T> u_dist(static_cast<T>(0), static_cast<T>(1)); std::normal_distribution <T> n_dist(static_cast<T>(0), static_cast<T>(1)); // READ-IN DATA if (!file.empty()) { std::ifstream ifs(file); std::string line; size_t rows=0; size_t cols = 0; while (std::getline(ifs, line)) { if (rows==0) { std::string buf; std::stringstream ss(line); while (ss >> buf) cols++; } //std::cout << line << std::endl; rows++; } cols--; //don't count target column printf("rows: %zu\n", rows); fflush(stdout); printf("cols (w/o response): %zu\n", cols); fflush(stdout); ifs.close(); size_t m = rows; size_t n = cols; A.resize(n*m); b.resize(m); w.resize(m); #ifdef _OPENMP #pragma omp parallel { #endif std::ifstream ifs(file); if (!ifs) { fprintf(stderr, "Cannot read file.\n"); exit(1); } else { // Expect space-separated file with response in last column, no header size_t maxlen = (n + 1) * 30; char line[maxlen]; const char sep[] = " ,"; size_t i=0; #ifdef _OPENMP int id = omp_get_thread_num(); int nth = omp_get_num_threads(); size_t len = m / nth; size_t from = id*len; size_t to = (id+1)*len; if (to > m) to = m; if (id==nth-1) to = m; //#pragma omp critical // { // std::cout << "thread " << id << " reads lines " << from << "..." << to << std::endl; // } #else size_t from = 0; size_t to = m; #endif for (; i < from; ++i) ifs.getline(line, maxlen); for (; i < to; ++i) { ifs.getline(line, maxlen); char *pch; char *savePtr; pch = strtok_r(line, sep, &savePtr); int j = 0; T val = static_cast<T>(atof(pch)); A[i * n + j] = val; j++; while (pch != NULL) { pch = strtok_r(NULL, sep, &savePtr); if (pch != NULL) { val = static_cast<T>(atof(pch)); if (j < n) { A[i * n + j] = val; } else { b[i] = val; w[i] = 1.0; // just constant weight //w[i] = 1E-6; // just constant weight //w[i] = 1E-3; // just constant weight } j++; } } } } #ifdef _OPENMP } #endif #ifdef _OPENMP #pragma omp parallel for #endif for (unsigned int i = 0; i < m * n; ++i) { if (!std::isfinite(A[i])) fprintf(stderr, "NF: A[%d]=%g\n", i, A[i]); } for (unsigned int i = 0; i < m; ++i) { if (!std::isfinite(b[i])) fprintf(stderr, "b[%d]=%g\n", i, b[i]); if (!std::isfinite(w[i])) fprintf(stderr, "w[%d]=%g\n", i, w[i]); } } else { // GENERATE DATA for (unsigned int i = 0; i < m * n; ++i) A[i] = n_dist(generator); std::vector <T> x_true(n); for (unsigned int i = 0; i < n; ++i) x_true[i] = u_dist(generator) < static_cast<T>(0.8) ? static_cast<T>(0) : n_dist(generator) / static_cast<T>(std::sqrt(n)); #ifdef _OPENMP #pragma omp parallel for #endif for (unsigned int i = 0; i < m; ++i) // rows for (unsigned int j = 0; j < n; ++j) // columns b[i] += A[i * n + j] * x_true[j]; // C(0-indexed) row-major order for (unsigned int i = 0; i < m; ++i) w[i] = 1.0; // constant weight for (unsigned int i = 0; i < m; ++i) b[i] += static_cast<T>(0.5) * n_dist(generator); } return(0); } #endif