source
stringlengths
3
92
c
stringlengths
26
2.25M
openmp_critical2.c
///TAFFO_TEST_ARGS -Xvra -propagate-all -fopenmp #include <stdio.h> #define MAX_N (100) int main(int argc, char *argv[]) { float array[MAX_N] __attribute__((annotate("scalar(range(0,100))"))); int i = 0; #pragma omp parallel for for (i = 0; i < MAX_N; i++) { array[i] = i * 1.0; } float result __attribute__((annotate("scalar(range(0,5000))"))) = 0; #pragma omp parallel for for (i = 0; i < MAX_N; i++) { #pragma omp critical result += array[i]; } printf("result: %f\n", result); }
GB_binop__min_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_int64 // A.*B function (eWiseMult): GB_AemultB__min_int64 // A*D function (colscale): GB_AxD__min_int64 // D*A function (rowscale): GB_DxB__min_int64 // C+=B function (dense accum): GB_Cdense_accumB__min_int64 // C+=b function (dense accum): GB_Cdense_accumb__min_int64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_int64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_int64 // C=scalar+B GB_bind1st__min_int64 // C=scalar+B' GB_bind1st_tran__min_int64 // C=A+scalar GB_bind2nd__min_int64 // C=A'+scalar GB_bind2nd_tran__min_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IMIN (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_INT64 || GxB_NO_MIN_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__min_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hermv_c_csc_u_lo_trans.c
#include "alphasparse/kernel.h" #ifdef _OPENMP #include <omp.h> #endif #include "alphasparse/util.h" #include <memory.h> static alphasparse_status_t hermv_csc_u_lo_trans_unroll(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { // m==n const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; const ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < m; ++i) { ALPHA_Number tmp1, tmp2; alpha_mul(tmp1, beta, y[i]); alpha_mul(tmp2, alpha, x[i]); alpha_add(y[i], tmp1, tmp2); } // each thread has a y_local ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < num_threads; i++) { y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT); memset(y_local[i], '\0', sizeof(ALPHA_Number) * m); } #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < n; ++i) { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT ais = A->cols_start[i]; ALPHA_INT aie = A->cols_end[i]; ALPHA_INT ail = aie - ais; ALPHA_INT start = alpha_lower_bound(&A->row_indx[ais], &A->row_indx[aie], i) - A->row_indx; if(start < aie && A->row_indx[start] == i) start += 1; const ALPHA_INT* A_row = &A->row_indx[ais]; const ALPHA_Number* A_val = &A->values[ais]; ALPHA_INT ai = start - ais ; ALPHA_Number alpha_xi, tmp; alpha_mul(alpha_xi, alpha, x[i]); for(; ai < ail-3; ai+=4) { ALPHA_Number av0 = A_val[ai]; ALPHA_Number av1 = A_val[ai + 1]; ALPHA_Number av2 = A_val[ai + 2]; ALPHA_Number av3 = A_val[ai + 3]; ALPHA_INT ar0 = A_row[ai]; ALPHA_INT ar1 = A_row[ai + 1]; ALPHA_INT ar2 = A_row[ai + 2]; ALPHA_INT ar3 = A_row[ai + 3]; alpha_madde_2c(y_local[tid][ar0], av0, alpha_xi); alpha_madde_2c(y_local[tid][ar1], av1, alpha_xi); alpha_madde_2c(y_local[tid][ar2], av2, alpha_xi); alpha_madde_2c(y_local[tid][ar3], av3, alpha_xi); alpha_mul(tmp, alpha, av0); alpha_madde(y_local[tid][i], tmp, x[ar0]); alpha_mul(tmp, alpha, av1); alpha_madde(y_local[tid][i], tmp, x[ar1]); alpha_mul(tmp, alpha, av2); alpha_madde(y_local[tid][i], tmp, x[ar2]); alpha_mul(tmp, alpha, av3); alpha_madde(y_local[tid][i], tmp, x[ar3]); } for(; ai < ail; ai++) { ALPHA_Number av = A_val[ai]; ALPHA_INT ar = A_row[ai]; alpha_madde_2c(y_local[tid][ar], av, alpha_xi); alpha_mul(tmp, alpha, av); alpha_madde(y_local[tid][i], tmp, x[ar]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT col = 0; col < m; col++) for(ALPHA_INT i = 0; i < num_threads; i++) { alpha_add(y[col], y[col], y_local[i][col]); } for(ALPHA_INT i = 0; i < num_threads; i++) { alpha_free(y_local[i]); } alpha_free(y_local); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return hermv_csc_u_lo_trans_unroll(alpha, A, x, beta, y); }
ODME.h
/* Portions Copyright 2019-2021 Xuesong Zhou and Peiheng Li, Cafer Avci * If you help write or modify the code, please also list your names here. * The reason of having Copyright info here is to ensure all the modified version, as a whole, under the GPL * and further prevent a violation of the GPL. * * More about "How to use GNU licenses for your own software" * http://www.gnu.org/licenses/gpl-howto.html */ // Peiheng, 02/03/21, remove them later after adopting better casting #pragma warning(disable : 4305 4267 4018) // stop warning: "conversion from 'int' to 'float', possible loss of data" #pragma warning(disable: 4244) #ifdef _WIN32 #include "pch.h" #endif #include <iostream> #include <fstream> #include <sstream> #include <iomanip> #include <string> #include <cstring> #include <cstdio> #include <ctime> #include <cmath> #include <algorithm> #include <functional> #include <stack> #include <list> #include <vector> #include <map> #include <omp.h> #include "config.h" #include "utils.h" using std::max; using std::min; using std::cout; using std::endl; using std::string; using std::vector; using std::map; using std::ifstream; using std::ofstream; using std::istringstream; #include "DTA.h" void Assignment::GenerateDefaultMeasurementData() { // step 1: read measurement.csv CCSVParser parser_measurement; if (parser_measurement.OpenCSVFile("measurement.csv", false)) { parser_measurement.CloseCSVFile(); return; } FILE* g_pFileModelLink = fopen("measurement.csv", "w"); if (g_pFileModelLink != NULL) { fprintf(g_pFileModelLink, "measurement_id,measurement_type,o_zone_id,d_zone_id,from_node_id,to_node_id,count1,upper_bound_flag1,notes\n"); //83 link 1 3 5000 int measurement_id = 1; int sampling_rate = g_link_vector.size() / 100 + 1; for (int i = 0; i < g_link_vector.size(); i++) { if (i % sampling_rate == 0 && g_link_vector[i].lane_capacity < 2500 && g_link_vector[i].link_type >= 1) { fprintf(g_pFileModelLink, "%d,link,,,%d,%d,%f,0,generated from preprocssing based on 1/3 of link capacity\n", measurement_id++, g_node_vector[g_link_vector[i].from_node_seq_no].node_id, g_node_vector[g_link_vector[i].to_node_seq_no].node_id, g_link_vector[i].lane_capacity * g_link_vector[i].number_of_lanes * 0.3333); } } fclose(g_pFileModelLink); } } // updates for OD re-generations void Assignment::Demand_ODME(int OD_updating_iterations, int sensitivity_analysis_iterations) { if (OD_updating_iterations >= 1) { GenerateDefaultMeasurementData(); // step 1: read measurement.csv CCSVParser parser_measurement; if (parser_measurement.OpenCSVFile("measurement.csv", true)) { while (parser_measurement.ReadRecord()) // if this line contains [] mark, then we will also read field headers. { string measurement_type; parser_measurement.GetValueByFieldName("measurement_type", measurement_type); if (measurement_type == "link") { int from_node_id; if (!parser_measurement.GetValueByFieldName("from_node_id", from_node_id)) continue; int to_node_id; if (!parser_measurement.GetValueByFieldName("to_node_id", to_node_id)) continue; // add the to node id into the outbound (adjacent) node list if (g_node_id_to_seq_no_map.find(from_node_id) == assignment.g_node_id_to_seq_no_map.end()) { dtalog.output() << "Error: from_node_id " << from_node_id << " in file measurement.csv is not defined in node.csv." << endl; //has not been defined continue; } if (g_node_id_to_seq_no_map.find(to_node_id) == assignment.g_node_id_to_seq_no_map.end()) { dtalog.output() << "Error: to_node_id " << to_node_id << " in file measurement.csv is not defined in node.csv." << endl; //has not been defined continue; } float count = -1; for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau) { int upper_bound_flag = 0; { int demand_period_id = assignment.g_DemandPeriodVector[tau].demand_period_id; if (assignment.g_DemandPeriodVector[tau].number_of_demand_files == 0) continue; char VDF_field_name[50]; sprintf(VDF_field_name, "count%d", demand_period_id); parser_measurement.GetValueByFieldName(VDF_field_name, count, true); sprintf(VDF_field_name, "upper_bound_flag%d", demand_period_id); parser_measurement.GetValueByFieldName(VDF_field_name, upper_bound_flag, true); } // map external node number to internal node seq no. int internal_from_node_seq_no = assignment.g_node_id_to_seq_no_map[from_node_id]; int internal_to_node_seq_no = assignment.g_node_id_to_seq_no_map[to_node_id]; if (g_node_vector[internal_from_node_seq_no].m_to_node_2_link_seq_no_map.find(internal_to_node_seq_no) != g_node_vector[internal_from_node_seq_no].m_to_node_2_link_seq_no_map.end()) { int link_seq_no = g_node_vector[internal_from_node_seq_no].m_to_node_2_link_seq_no_map[internal_to_node_seq_no]; if (g_link_vector[link_seq_no].VDF_period[tau].obs_count >= 1) // data exist { if (upper_bound_flag == 0) { // over write only if the new data are acutal counts, g_link_vector[link_seq_no].VDF_period[tau].obs_count = count; g_link_vector[link_seq_no].VDF_period[tau].upper_bound_flag = upper_bound_flag; } else // if the new data are upper bound, skip it and keep the actual counts { // do nothing } } else { g_link_vector[link_seq_no].VDF_period[tau].obs_count = count; g_link_vector[link_seq_no].VDF_period[tau].upper_bound_flag = upper_bound_flag; } } else { dtalog.output() << "Error: Link " << from_node_id << "->" << to_node_id << " in file timing.csv is not defined in link.csv." << endl; continue; } } if (measurement_type == "production") { int o_zone_id; if (!parser_measurement.GetValueByFieldName("o_zone_id", o_zone_id)) continue; if (g_zoneid_to_zone_seq_no_mapping.find(o_zone_id) != g_zoneid_to_zone_seq_no_mapping.end()) { float obs_production = -1; if (parser_measurement.GetValueByFieldName("count", obs_production)) { g_zone_vector[g_zoneid_to_zone_seq_no_mapping[o_zone_id]].obs_production = obs_production; } } } if (measurement_type == "attraction") { int o_zone_id; if (!parser_measurement.GetValueByFieldName("d_zone_id", o_zone_id)) continue; if (g_zoneid_to_zone_seq_no_mapping.find(o_zone_id) != g_zoneid_to_zone_seq_no_mapping.end()) { float obs_attraction = -1; if (parser_measurement.GetValueByFieldName("count", obs_attraction)) { g_zone_vector[g_zoneid_to_zone_seq_no_mapping[o_zone_id]].obs_attraction = obs_attraction; } } } } } parser_measurement.CloseCSVFile(); } // step 1: input the measurements of // Pi // Dj // link l // step 2: loop for adjusting OD demand double prev_gap = 9999999; for (int s = 0; s < OD_updating_iterations; ++s) { float total_gap = 0; float total_relative_gap = 0; float total_system_travel_cost = 0; //step 2.1 // we can have a recursive formulat to reupdate the current link volume by a factor of k/(k+1), // and use the newly generated path flow to add the additional 1/(k+1) double system_gap = 0; double gap = g_reset_and_update_link_volume_based_on_ODME_columns(g_link_vector.size(), s, system_gap); //step 2.2: based on newly calculated path volumn, update volume based travel time, and update volume based measurement error/deviation // and use the newly generated path flow to add the additional 1/(k+1) double gap_improvement = gap - prev_gap; //if (s >= 5 && gap_improvement > 0.01) // convergency criterion // comment out to maintain consistency // break; //if(s == OD_updating_iterations - sensitivity_analysis_iterations) //{ // //sensitivity analysis is activated SA iterations right before the end of entire ODME process to keep the route choice factors into account // // e.g. ODME 200: SA = 10: then the lane change occurs exactly once at iteration 200-10 = 190, the route choice change still happens from 190, 191,192, till 200 // for (int i = 0; i < g_link_vector.size(); ++i) // { // for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau) // { // // used in travel time calculation // if (g_link_vector[i].VDF_period[tau].sa_lanes_change != 0) // we // { // g_link_vector[i].VDF_period[tau].nlanes += g_link_vector[i].VDF_period[tau].sa_lanes_change; // apply the lane changes // } // } // } //} prev_gap = gap; int column_pool_counts = 0; int column_path_counts = 0; int column_pool_with_sensor_counts = 0; int column_path_with_sensor_counts = 0; //step 3: calculate shortest path at inner iteration of column flow updating #pragma omp parallel for for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o { CColumnVector* p_column_pool; std::map<int, CColumnPath>::iterator it, it_begin, it_end; int column_vector_size; int path_seq_count = 0; float path_toll = 0; float path_gradient_cost = 0; float path_distance = 0; float path_travel_time = 0; int link_seq_no; float total_switched_out_path_volume = 0; float step_size = 0; float previous_path_volume = 0; int from_zone_sindex = g_zone_vector[orig].sindex; if (from_zone_sindex == -1) continue; for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d { int to_zone_sindex = g_zone_vector[dest].sindex; if (to_zone_sindex == -1) continue; for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //at { for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau { p_column_pool = &(assignment.g_column_pool[from_zone_sindex][to_zone_sindex][at][tau]); if (p_column_pool->od_volume > 0) { column_pool_counts++; column_vector_size = p_column_pool->path_node_sequence_map.size(); path_seq_count = 0; it_begin = p_column_pool->path_node_sequence_map.begin(); it_end = p_column_pool->path_node_sequence_map.end(); //stage 1: least cost double least_cost = 999999; int least_cost_path_seq_no = -1; int least_cost_path_node_sum_index = -1; path_seq_count = 0; it_begin = p_column_pool->path_node_sequence_map.begin(); it_end = p_column_pool->path_node_sequence_map.end(); for (it = it_begin; it != it_end; ++it) { path_toll = 0; path_distance = 0; path_travel_time = 0; for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a { link_seq_no = it->second.path_link_vector[nl]; path_toll += g_link_vector[link_seq_no].VDF_period[tau].toll[at]; path_distance += g_link_vector[link_seq_no].link_distance_VDF; double link_travel_time = g_link_vector[link_seq_no].travel_time_per_period[tau]; path_travel_time += link_travel_time; } it->second.path_toll = path_toll; it->second.path_travel_time = path_travel_time; if (path_travel_time < least_cost) { least_cost = path_travel_time; least_cost_path_seq_no = it->second.path_seq_no; least_cost_path_node_sum_index = it->first; } } //stage 2: deviation based on observation int i = 0; for (it = it_begin; it != it_end; ++it, ++i) // for each k { column_path_counts++; //if (s >= 1 && it->second.measurement_flag == 0) // after 1 iteration, if there are no data passing through this path column. we will skip it in the ODME process // continue; it->second.UE_gap = (it->second.path_travel_time - least_cost); path_gradient_cost = 0; path_distance = 0; path_travel_time = 0; p_column_pool->m_passing_sensor_flag = 0; // step 3.1 origin production flow gradient // est_production_dev = est_production - obs_production; // requirement: when equality flag is 1, if (g_zone_vector[orig].obs_production > 0) { if (g_zone_vector[orig].obs_production_upper_bound_flag == 0) path_gradient_cost += g_zone_vector[orig].est_production_dev; if (g_zone_vector[orig].obs_production_upper_bound_flag == 1 && g_zone_vector[orig].est_production_dev > 0) /*only if est_production is greater than obs value , otherwise, do not apply*/ path_gradient_cost += g_zone_vector[orig].est_production_dev; p_column_pool->m_passing_sensor_flag += 1; it->second.measurement_flag = 1; } // step 3.2 destination attraction flow gradient if (g_zone_vector[dest].obs_attraction > 0) { if (g_zone_vector[orig].obs_attraction_upper_bound_flag == 0) path_gradient_cost += g_zone_vector[dest].est_attraction_dev; if (g_zone_vector[orig].obs_attraction_upper_bound_flag == 1 && g_zone_vector[dest].est_attraction_dev > 0) path_gradient_cost += g_zone_vector[dest].est_attraction_dev; p_column_pool->m_passing_sensor_flag += 1; it->second.measurement_flag = 1; } float est_count_dev = 0; for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a { // step 3.3 link flow gradient link_seq_no = it->second.path_link_vector[nl]; if (g_link_vector[link_seq_no].VDF_period[tau].obs_count >= 1) { if (g_link_vector[link_seq_no].VDF_period[tau].upper_bound_flag == 0) { path_gradient_cost += g_link_vector[link_seq_no].VDF_period[tau].est_count_dev; est_count_dev += g_link_vector[link_seq_no].VDF_period[tau].est_count_dev; } if (g_link_vector[link_seq_no].VDF_period[tau].upper_bound_flag == 1 && g_link_vector[link_seq_no].VDF_period[tau].est_count_dev > 0) {// we only consider the over capaity value here to penalize the path flow path_gradient_cost += g_link_vector[link_seq_no].VDF_period[tau].est_count_dev; est_count_dev += g_link_vector[link_seq_no].VDF_period[tau].est_count_dev; } p_column_pool->m_passing_sensor_flag += 1; it->second.measurement_flag = 1; } } // statistics collection if (it->second.measurement_flag >= 1) column_path_with_sensor_counts++; it->second.path_gradient_cost = path_gradient_cost; step_size = 0.05; float prev_path_volume = it->second.path_volume; double weight_of_measurements = 1; // ad hoc weight on the measurements with respect to the UE gap// because unit of UE gap is around 1-5 mins, measurement error is around 100 vehicles per hour per lane double change = step_size * (weight_of_measurements * it->second.path_gradient_cost + (1 - weight_of_measurements) * it->second.UE_gap); // dtalog.output() <<" path =" << i << ", gradient cost of measurements =" << it->second.path_gradient_cost << ", UE gap=" << it->second.UE_gap << endl; float change_lower_bound = it->second.path_volume * 0.1 * (-1); float change_upper_bound = it->second.path_volume * 0.1; // reset if (change < change_lower_bound) change = change_lower_bound; // reset if (change > change_upper_bound) change = change_upper_bound; it->second.path_volume = max(1.0, it->second.path_volume - change); if (dtalog.log_odme() == 1) { dtalog.output() << "OD " << orig << "-> " << dest << " path id:" << i << ", prev_vol" << prev_path_volume << ", gradient_cost = " << it->second.path_gradient_cost << " UE gap," << it->second.UE_gap << " link," << g_link_vector[link_seq_no].VDF_period[tau].est_count_dev << " P," << g_zone_vector[orig].est_production_dev << " A," << g_zone_vector[orig].est_attraction_dev << "proposed change = " << step_size * it->second.path_gradient_cost << "actual change = " << change << "new vol = " << it->second.path_volume << endl; } } // end of loop for all paths in the column pools // record adjustment results for (it = it_begin; it != it_end; ++it) // for each k { it->second.path_time_per_iteration_ODME_map[s] = path_travel_time; it->second.path_volume_per_iteration_ODME_map[s] = it->second.path_volume; } //if (p_column_pool->m_passing_sensor_flag >= 1) // column_pool_with_sensor_counts++; } } } } } if (s == 0) { float percentage_of_OD_columns_with_sensors = column_pool_with_sensor_counts * 1.0 / max(1, column_pool_counts) * 100; float percentage_of_paths_with_sensors = column_path_with_sensor_counts * 1.0 / max(1, column_path_counts) * 100; dtalog.output() << "count of all column pool vectors=" << column_pool_counts << ", " << "count of all paths =" << column_path_counts << ", " << "count of column_pools with sensors = " << column_pool_with_sensor_counts << "(" << percentage_of_OD_columns_with_sensors << "%), " << "count of column_paths with sensors = " << column_path_with_sensor_counts << " (" << percentage_of_paths_with_sensors << "%)" << endl; } } // post-procese link volume based on OD volumns // very import: noted by Peiheng and Xuesong on 01/30/2022 double system_gap = 0; g_reset_and_update_link_volume_based_on_ODME_columns(g_link_vector.size(), OD_updating_iterations, system_gap); // we now have a consistent link-to-path volumne in g_link_vector[link_seq_no].PCE_volume_per_period[tau] } // stage II; g_classification_in_column_pool(assignment); }
ConvolutionIm2Col.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include <fstream> #include <vector> #include <random> #include "bb/Manager.h" #include "bb/Model.h" #include "bb/FrameBuffer.h" #include "bb/Filter2d.h" namespace bb { template <typename FT = float, typename BT = float> class ConvolutionIm2Col : public Model { using _super = Model; public: static inline std::string ModelName(void) { return "ConvolutionIm2Col"; } static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<FT>::Name() + "_" + DataType<BT>::Name(); } std::string GetModelName(void) const override { return ModelName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: bool m_host_only = false; indices_t m_input_shape; indices_t m_output_shape; index_t m_input_frame_size = 1; index_t m_output_frame_size = 1; index_t m_input_c_size = 1; index_t m_input_h_size = 1; index_t m_input_w_size = 1; index_t m_filter_h_size = 1; index_t m_filter_w_size = 1; index_t m_y_stride = 1; index_t m_x_stride = 1; index_t m_y_offset = 0; index_t m_x_offset = 0; index_t m_output_h_size = 1; index_t m_output_w_size = 1; std::string m_padding = "valid"; int m_border_mode = BB_BORDER_REFLECT_101; FT m_border_value = (FT)0; public: struct create_t { index_t filter_h_size = 1; index_t filter_w_size = 1; index_t x_stride = 1; index_t y_stride = 1; std::string padding = "valid"; std::string border_mode = "reflect_101"; FT border_value = (FT)0; }; protected: ConvolutionIm2Col(create_t const & create) { m_filter_h_size = create.filter_h_size; m_filter_w_size = create.filter_w_size; m_x_stride = create.x_stride; m_y_stride = create.y_stride; m_padding = create.padding; m_border_mode = BorderConv(create.border_mode); m_border_value = create.border_value; } /** * @brief コマンド処理 * @detail コマンド処理 * @param args コマンド */ void CommandProc(std::vector<std::string> args) { // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } } public: ~ConvolutionIm2Col() {} static std::shared_ptr<ConvolutionIm2Col> Create(create_t const &create) { return std::shared_ptr<ConvolutionIm2Col>(new ConvolutionIm2Col(create)); } static std::shared_ptr<ConvolutionIm2Col> Create(index_t filter_h_size, index_t filter_w_size, index_t y_stride=1, index_t x_stride=1, std::string padding="valid", std::string border_mode = "reflect_101") { create_t create; create.filter_h_size = filter_h_size; create.filter_w_size = filter_w_size; create.y_stride = y_stride; create.x_stride = x_stride; create.padding = padding; create.border_mode = border_mode; return Create(create); } static std::shared_ptr<ConvolutionIm2Col> Create(void) { return Create(create_t()); } #ifdef BB_PYBIND11 static std::shared_ptr<ConvolutionIm2Col> CreatePy(index_t filter_h_size, index_t filter_w_size, index_t y_stride=1, index_t x_stride=1, std::string padding="valid", std::string border_mode = "reflect_101") { create_t create; create.filter_h_size = filter_h_size; create.filter_w_size = filter_w_size; create.y_stride = y_stride; create.x_stride = x_stride; create.padding = padding; create.border_mode = border_mode; return Create(create); } #endif index_t GetFilterSizeH(void) const { return m_filter_h_size; } index_t GetFilterSizeW(void) const { return m_filter_w_size; } index_t GetStrideX(void) const { return m_x_stride; } index_t GetStrideY(void) const { return m_y_stride; } std::string GetPadding(void) const { return m_padding; } // int GetBorderMode(void) const { return m_border_mode; }*/ std::string GetBorderMode(void) const { switch ( m_border_mode ) { case BB_BORDER_CONSTANT: return "constant"; case BB_BORDER_REFLECT: return "reflect"; case BB_BORDER_REFLECT_101: return "reflect_101"; case BB_BORDER_REPLICATE: return "replicate"; case BB_BORDER_WRAP: return "wrap"; } BB_DEBUG_ASSERT(0); return ""; } FT GetBorderValue(void) const { return m_border_value; } /** * @brief 入力のshape設定 * @detail 入力のshape設定 * @param shape 新しいshape * @return なし */ indices_t SetInputShape(indices_t shape) { // 設定済みなら何もしない if ( shape == this->GetInputShape() ) { return this->GetOutputShape(); } // 形状設定 m_input_shape = shape; BB_ASSERT(m_input_shape.size() == 3); m_input_c_size = m_input_shape[0]; m_input_h_size = m_input_shape[1]; m_input_w_size = m_input_shape[2]; // 出力サイズ計算 if ( m_padding == "valid" ) { m_output_h_size = ((m_input_h_size - m_filter_h_size + 1) + (m_y_stride - 1)) / m_y_stride; m_output_w_size = ((m_input_w_size - m_filter_w_size + 1) + (m_x_stride - 1)) / m_x_stride; m_y_offset = 0; m_x_offset = 0; } else if ( m_padding == "same" ) { m_output_h_size = (m_input_h_size + (m_y_stride - 1)) / m_y_stride; m_output_w_size = (m_input_w_size + (m_x_stride - 1)) / m_x_stride; m_y_offset = (m_filter_h_size - 1) / 2; m_x_offset = (m_filter_w_size - 1) / 2; } else { BB_ASSERT(0); } m_output_shape.resize(3); m_output_shape[0] = m_input_c_size; m_output_shape[1] = m_filter_h_size; m_output_shape[2] = m_filter_w_size; return m_output_shape; } /** * @brief 入力形状取得 * @detail 入力形状を取得する * @return 入力形状を返す */ indices_t GetInputShape(void) const { return m_input_shape; } /** * @brief 出力形状取得 * @detail 出力形状を取得する * @return 出力形状を返す */ indices_t GetOutputShape(void) const { return m_output_shape; } protected: inline index_t GetInputNode(index_t c, index_t y, index_t x) { return (c * m_input_h_size + y)*m_input_w_size + x; } inline index_t GetOutputNode(index_t c, index_t y, index_t x) { return (c*m_filter_h_size + y)*m_filter_w_size + x; } inline int BorderConv(std::string const& mode) { if ( mode == "constant" ) { return BB_BORDER_CONSTANT; } if ( mode == "reflect" ) { return BB_BORDER_REFLECT ; } if ( mode == "reflect_101" ) { return BB_BORDER_REFLECT_101; } if ( mode == "replicate" ) { return BB_BORDER_REPLICATE; } if ( mode == "wrap" ) { return BB_BORDER_WRAP; } BB_DEBUG_ASSERT(0); return BB_BORDER_CONSTANT; } inline bool Border(int border_mode, index_t &x, index_t &y, index_t w, index_t h) { switch ( border_mode ) { case BB_BORDER_REFLECT: if ( x < 0 ) { x = -x - 1; } if ( y < 0 ) { y = -y - 1; } if ( x >= w ) { x = (w - 1) - (x - w); } if ( y >= h ) { y = (h - 1) - (y - h); } return true; case BB_BORDER_REFLECT_101: if ( x < 0 ) { x = -x; } if ( y < 0 ) { y = -y; } if ( x >= w ) { x = (w - 2) - (x - w); } if ( y >= h ) { y = (h - 2) - (y - h); } return true; case BB_BORDER_REPLICATE: if ( x < 0 ) { x = 0; } if ( y < 0 ) { y = 0; } if ( x >= w ) { x = w - 1; } if ( y >= h ) { y = h - 1; } return true; case BB_BORDER_WRAP: if ( x < 0 ) { x += w; } if ( y < 0 ) { y += h; } if ( x >= w ) { x -= w; } if ( y >= h ) { y -= h; } return true; default: return false; } } public: FrameBuffer Forward(FrameBuffer x_buf, bool train = true) { BB_ASSERT(x_buf.GetType() == DataType<FT>::type); // SetInputShpaeされていなければ初回に設定 if ( x_buf.GetShape() != m_input_shape ) { SetInputShape(x_buf.GetShape()); } // 出力Frameサイズ計算 m_input_frame_size = x_buf.GetFrameSize(); m_output_frame_size = m_input_frame_size * m_output_h_size * m_output_w_size; // 出力形状設定 FrameBuffer y_buf(m_output_frame_size, m_output_shape, x_buf.GetType()); #ifdef BB_WITH_CUDA if ( DataType<FT>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) { // FP32 CUDA auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_y = y_buf.LockDeviceMemory(); bbcu_fp32_Im2Col_Forward( (float const *)ptr_x.GetAddr(), (float *)ptr_y.GetAddr(), (int )m_x_stride, (int )m_y_stride, (int )m_x_offset, (int )m_y_offset, (int )m_input_frame_size, (int )x_buf.GetFrameStride() / sizeof(float), (int )m_input_w_size, (int )m_input_h_size, (int )m_input_c_size, (int )m_output_w_size, (int )m_output_h_size, (int )y_buf.GetFrameStride() / sizeof(float), (int )m_filter_w_size, (int )m_filter_h_size, (int )m_border_mode, (float )m_border_value ); return y_buf; } #endif #ifdef BB_WITH_CUDA if ( m_filter_w_size * m_filter_h_size <= 1024 / 32 && DataType<FT>::type == BB_TYPE_BIT && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) { // bit CUDA auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_y = y_buf.LockDeviceMemory(); bbcu_bit_Im2Col_Forward( (int const *)ptr_x.GetAddr(), (int *)ptr_y.GetAddr(), (int )m_x_stride, (int )m_y_stride, (int )m_x_offset, (int )m_y_offset, (int )m_input_frame_size, (int )x_buf.GetFrameStride() / sizeof(int), (int )m_input_w_size, (int )m_input_h_size, (int )m_input_c_size, (int )m_output_w_size, (int )m_output_h_size, (int )y_buf.GetFrameStride() / sizeof(int), (int )m_filter_w_size, (int )m_filter_h_size, (int )m_border_mode ); return y_buf; } #endif { // 汎用版 index_t const output_frame_size = y_buf.GetFrameSize(); index_t const output_size = m_output_w_size * m_output_h_size; auto x_ptr = x_buf.LockConst<FT>(); auto y_ptr = y_buf.Lock<FT>(true); for (index_t c = 0; c < m_input_c_size; ++c ) { #pragma omp parallel for for (index_t fy = 0; fy < m_filter_h_size; ++fy) { #pragma omp parallel for for (index_t fx = 0; fx < m_filter_w_size; ++fx) { for ( index_t output_frame = 0; output_frame < output_frame_size; ++output_frame ) { index_t input_frame = output_frame / output_size; index_t f = output_frame % output_size; index_t iy = (f / m_output_w_size) * m_y_stride - m_y_offset + fy; index_t ix = (f % m_output_w_size) * m_x_stride - m_x_offset + fx; FT in_sig = m_border_value; if ( iy >= 0 && iy < m_input_h_size && ix >= 0 && ix < m_input_w_size ) { index_t input_node = (c * m_input_h_size + iy) * m_input_w_size + ix; in_sig = x_ptr.Get(input_frame, input_node); } else { if ( Border(m_border_mode, ix, iy, m_input_w_size, m_input_h_size) ) { index_t input_node = (c * m_input_h_size + iy) * m_input_w_size + ix; in_sig = x_ptr.Get(input_frame, input_node); } } index_t output_node = (c * m_filter_h_size + fy) * m_filter_w_size + fx; y_ptr.Set(output_frame, output_node, in_sig); } } } } return y_buf; } } FrameBuffer Backward(FrameBuffer dy_buf) { if (dy_buf.Empty()) { return dy_buf; } BB_ASSERT(dy_buf.GetType() == DataType<BT>::type); // 出力設定 FrameBuffer dx_buf(m_input_frame_size, m_input_shape, DataType<BT>::type); #ifdef BB_WITH_CUDA if ( DataType<BT>::type == BB_TYPE_FP32 && !m_host_only && dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) { auto ptr_dy = dy_buf.LockDeviceMemoryConst(); auto ptr_dx = dx_buf.LockDeviceMemory(); bbcu_fp32_Im2Col_Backward( (float const *)ptr_dy.GetAddr(), (float *)ptr_dx.GetAddr(), (int )m_x_stride, (int )m_y_stride, (int )m_x_offset, (int )m_y_offset, (int )m_input_frame_size, (int )(dx_buf.GetFrameStride() / sizeof(float)), (int )m_input_w_size, (int )m_input_h_size, (int )m_input_c_size, (int )m_output_w_size, (int )m_output_h_size, (int )(dy_buf.GetFrameStride() / sizeof(float)), (int )m_filter_w_size, (int )m_filter_h_size); return dx_buf; } #endif { // stride版 dx_buf.FillZero(); auto dy_ptr = dy_buf.LockConst<BT>(); auto dx_ptr = dx_buf.Lock<BT>(); index_t iy_limit = (m_output_h_size - 1) * m_y_stride; index_t ix_limit = (m_output_w_size - 1) * m_x_stride; for (index_t c = 0; c < m_input_c_size; ++c) { #pragma omp parallel for for (index_t y = 0; y < m_input_h_size; ++y ) { #pragma omp parallel for for (index_t x = 0; x < m_input_w_size; ++x ) { index_t input_node = (c * m_input_h_size + y) * m_input_w_size + x; index_t x_align = x % m_x_stride; index_t y_align = y % m_y_stride; for ( index_t input_frame = 0; input_frame < m_input_frame_size; ++input_frame ) { BT dx = 0; // dx_ptr.Get(input_frame, input_node); float dy = 0; for (index_t fy = y_align; fy < m_filter_h_size; fy += m_y_stride ) { index_t iy = y - fy + m_y_offset; if ( iy >= 0 && iy <= iy_limit ) { for (index_t fx = x_align; fx < m_filter_w_size; fx += m_x_stride) { index_t ix = x - fx + m_x_offset; if ( ix >= 0 && ix <= ix_limit ) { index_t output_frame = (input_frame * m_output_h_size + (iy/m_y_stride)) * m_output_w_size + (ix/m_x_stride); index_t output_node = (c * m_filter_h_size + fy) * m_filter_w_size + fx; dy += dy_ptr.Get(output_frame, output_node); } } } } dx_ptr.Set(input_frame, input_node, dx + dy); } } } } return dx_buf; } } // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_host_only); bb::SaveValue(os, m_input_frame_size); bb::SaveValue(os, m_output_frame_size); bb::SaveValue(os, m_input_c_size); bb::SaveValue(os, m_input_h_size); bb::SaveValue(os, m_input_w_size); bb::SaveValue(os, m_filter_h_size); bb::SaveValue(os, m_filter_w_size); bb::SaveValue(os, m_y_stride); bb::SaveValue(os, m_x_stride); bb::SaveValue(os, m_padding); bb::SaveValue(os, m_border_mode); bb::SaveValue(os, m_border_value); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_host_only); bb::LoadValue(is, m_input_frame_size); bb::LoadValue(is, m_output_frame_size); bb::LoadValue(is, m_input_c_size); bb::LoadValue(is, m_input_h_size); bb::LoadValue(is, m_input_w_size); bb::LoadValue(is, m_filter_h_size); bb::LoadValue(is, m_filter_w_size); bb::LoadValue(is, m_y_stride); bb::LoadValue(is, m_x_stride); bb::LoadValue(is, m_padding); bb::LoadValue(is, m_border_mode); bb::LoadValue(is, m_border_value); // 再構築 m_input_shape = bb::indices_t({m_input_c_size, m_input_h_size, m_input_w_size}); m_output_shape = bb::indices_t({m_input_c_size, m_filter_h_size, m_filter_w_size}); if ( m_padding == "valid" ) { m_output_h_size = ((m_input_h_size - m_filter_h_size + 1) + (m_y_stride - 1)) / m_y_stride; m_output_w_size = ((m_input_w_size - m_filter_w_size + 1) + (m_x_stride - 1)) / m_x_stride; m_y_offset = 0; m_x_offset = 0; } else if ( m_padding == "same" ) { m_output_h_size = (m_input_h_size + (m_y_stride - 1)) / m_y_stride; m_output_w_size = (m_input_w_size + (m_x_stride - 1)) / m_x_stride; m_y_offset = (m_filter_h_size - 1) / 2; m_x_offset = (m_filter_w_size - 1) / 2; } else { BB_ASSERT(0); } } }; }
GB_unop__identity_fp32_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp32_int16 // op(A') function: GB_unop_tran__identity_fp32_int16 // C type: float // A type: int16_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp32_int16 ( float *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_fp64 // op(A') function: GB_tran__lnot_uint64_fp64 // C type: uint64_t // A type: double // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint64_t z ; GB_CAST_UNSIGNED(z,aij,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_fp64 ( uint64_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
IBKMKC_vector_operations.c
/* IBK Math Kernel Library Copyright (c) 2001-today, Institut fuer Bauklimatik, TU Dresden, Germany Written by A. Nicolai, A. Paepcke, H. Fechner, St. Vogelsang All rights reserved. This file is part of the IBKMK Library. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This library contains derivative work based on other open-source libraries, see LICENSE and OTHER_LICENSES files. */ #include "IBKMKC_vector_operations.h" #include <memory.h> #include <IBK_openMP.h> #ifdef __cplusplus namespace IBKMK { #endif void vectorUInt_fill( IBKMK_CONST unsigned int vectorSize, unsigned int * targetVector, IBKMK_CONST unsigned int value){ int i=0; int j; #pragma omp single copyprivate(i) { for (; i<(int)vectorSize % 8; ++i) { targetVector[i] = value; } } #pragma omp for for ( j = i; j<(int)vectorSize; j+=8) { targetVector[j ] = value; targetVector[j+1] = value; targetVector[j+2] = value; targetVector[j+3] = value; targetVector[j+4] = value; targetVector[j+5] = value; targetVector[j+6] = value; targetVector[j+7] = value; } } void vector_fill( IBKMK_CONST unsigned int vectorSize, double * targetVector, IBKMK_CONST double value){ int i=0; int j; #pragma omp single copyprivate(i) { for (; i<(int)vectorSize % 8; ++i) { targetVector[i] = value; } } #pragma omp for for ( j = i; j<(int)vectorSize; j+=8) { targetVector[j ] = value; targetVector[j+1] = value; targetVector[j+2] = value; targetVector[j+3] = value; targetVector[j+4] = value; targetVector[j+5] = value; targetVector[j+6] = value; targetVector[j+7] = value; } } void vector_copy( IBKMK_CONST unsigned int n, IBKMK_CONST double * x, double * y) { // openmp doesn't support memcopy since it is not thread safe (static variable use). // for serial code, always use memcopy #if defined(_OPENMP) unsigned int i=0; int j; #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { y[i] = x[i]; } } #pragma omp for for ( j = i; j<(int)n; j+=8) { y[j ] = x[j ]; y[j+1] = x[j+1]; y[j+2] = x[j+2]; y[j+3] = x[j+3]; y[j+4] = x[j+4]; y[j+5] = x[j+5]; y[j+6] = x[j+6]; y[j+7] = x[j+7]; } #else /// \todo Think about checking against x == y --> might indicate programming error in /// calling code. // nobody beats memcopy as long he uses compiler intrinsics memcpy(y, x, n*sizeof(double) ); #endif // defined(_OPENMP) } void vector_scale(IBKMK_CONST unsigned int n, double a, IBKMK_CONST double * x, double * y) { unsigned int i=0; int j; /* align data */ #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { y[i] = a*x[i]; } } /* use loop unrolling for 8 bytes */ #pragma omp for for ( j=i; j<(int)n; j+=8) { y[j ] = a*x[j ]; y[j+1] = a*x[j+1]; y[j+2] = a*x[j+2]; y[j+3] = a*x[j+3]; y[j+4] = a*x[j+4]; y[j+5] = a*x[j+5]; y[j+6] = a*x[j+6]; y[j+7] = a*x[j+7]; } } void vector_scale_by(IBKMK_CONST unsigned int n, double a, double * x) { unsigned int i=0; int j; /* align data */ #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { x[i] *= a; } } /* use loop unrolling for 8 bytes */ #pragma omp for for (j=i; j<(int)n; j+=8) { x[j ] *= a; x[j+1] *= a; x[j+2] *= a; x[j+3] *= a; x[j+4] *= a; x[j+5] *= a; x[j+6] *= a; x[j+7] *= a; } } void vector_add(IBKMK_CONST unsigned int n, double a, IBKMK_CONST double * x, double * y) { unsigned int i=0; int j; if (a == IBKMK_ONE) { /* align data */ #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { y[i] += x[i]; } } /* use loop unrolling for 8 bytes */ #pragma omp for for ( j=i; j<(int)n; j+=8) { y[j ] += x[j ]; y[j+1] += x[j+1]; y[j+2] += x[j+2]; y[j+3] += x[j+3]; y[j+4] += x[j+4]; y[j+5] += x[j+5]; y[j+6] += x[j+6]; y[j+7] += x[j+7]; } } else { /* align data */ #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { y[i] += a*x[i]; } } /* use loop unrolling for 8 bytes */ #pragma omp for for ( j=i; j<(int)n; j+=8) { y[j ] += a*x[j ]; y[j+1] += a*x[j+1]; y[j+2] += a*x[j+2]; y[j+3] += a*x[j+3]; y[j+4] += a*x[j+4]; y[j+5] += a*x[j+5]; y[j+6] += a*x[j+6]; y[j+7] += a*x[j+7]; } } } void vector_sub(IBKMK_CONST unsigned int n, IBKMK_CONST double * x, double * y) { unsigned int i=0; int j; /* align data */ #pragma omp single copyprivate(i) { for (; i<n % 8; ++i) { y[i] -= x[i]; } } /* use loop unrolling for 8 bytes */ #pragma omp for for (j=i; j<(int)n; j+=8) { y[j ] -= x[j ]; y[j+1] -= x[j+1]; y[j+2] -= x[j+2]; y[j+3] -= x[j+3]; y[j+4] -= x[j+4]; y[j+5] -= x[j+5]; y[j+6] -= x[j+6]; y[j+7] -= x[j+7]; } } void vector_linear_sum(IBKMK_CONST unsigned int n, double a, IBKMK_CONST double * x, double b, IBKMK_CONST double * y, double * z) { unsigned int i=0; int j; #pragma omp single copyprivate(i) { /* align data */ for (; i<n % 8; ++i) { z[i] = a*x[i] + b*y[i]; } } /* use loop unrolling for 8 bytes */ #pragma omp for for ( j=i; j<(int)n; j+=8) { z[j ] = a*x[j ] + b*y[j ]; z[j+1] = a*x[j+1] + b*y[j+1]; z[j+2] = a*x[j+2] + b*y[j+2]; z[j+3] = a*x[j+3] + b*y[j+3]; z[j+4] = a*x[j+4] + b*y[j+4]; z[j+5] = a*x[j+5] + b*y[j+5]; z[j+6] = a*x[j+6] + b*y[j+6]; z[j+7] = a*x[j+7] + b*y[j+7]; } } #ifdef __cplusplus } // namespace IBKMK #endif
format_avx512.h
#ifndef FORMAT_AVX512_H #define FORMAT_AVX512_H #include "common_avx512.h" #include "utils_avx512.h" template<typename iT, typename uiT> void generate_partition_pointer_s1_kernel(const iT *d_row_pointer, uiT *d_partition_pointer, const int sigma, const iT p, const iT m, const iT nnz) { #pragma omp parallel for for (iT global_id = 0; global_id <= p; global_id++) { // compute partition boundaries by partition of size sigma * omega iT boundary = global_id * sigma * ANONYMOUSLIB_CSR5_OMEGA; // clamp partition boundaries to [0, nnz] boundary = boundary > nnz ? nnz : boundary; // binary search d_partition_pointer[global_id] = binary_search_right_boundary_kernel<iT>(d_row_pointer, boundary, m + 1) - 1; } } template<typename iT, typename uiT> void generate_partition_pointer_s2_kernel(const iT *d_row_pointer, uiT *d_partition_pointer, const iT p) { #pragma omp parallel for for (iT group_id = 0; group_id < p; group_id++) { int dirty = 0; uiT start = d_partition_pointer[group_id]; uiT stop = d_partition_pointer[group_id+1]; start = (start << 1) >> 1; stop = (stop << 1) >> 1; if(start == stop) continue; for (iT row_idx = start; row_idx <= stop; row_idx++) { if (d_row_pointer[row_idx] == d_row_pointer[row_idx+1]) { dirty = 1; break; } } if (dirty) { start |= sizeof(uiT) == 4 ? 0x80000000 : 0x8000000000000000; d_partition_pointer[group_id] = start; } } } template<typename ANONYMOUSLIB_IT, typename ANONYMOUSLIB_UIT> int generate_partition_pointer(const int sigma, const ANONYMOUSLIB_IT p, const ANONYMOUSLIB_IT m, const ANONYMOUSLIB_IT nnz, ANONYMOUSLIB_UIT *partition_pointer, const ANONYMOUSLIB_IT *row_pointer) { // step 1. binary search row pointer generate_partition_pointer_s1_kernel<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT> (row_pointer, partition_pointer, sigma, p, m, nnz); // step 2. check empty rows generate_partition_pointer_s2_kernel<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT> (row_pointer, partition_pointer, p); return ANONYMOUSLIB_SUCCESS; } template<typename iT, typename uiT> void generate_partition_descriptor_s1_kernel(const iT *d_row_pointer, const uiT *d_partition_pointer, uiT *d_partition_descriptor, const iT m, const iT p, const int sigma, const int bit_all_offset, const int num_packet) { #pragma omp parallel for for (int par_id = 0; par_id < p-1; par_id++) { const iT row_start = d_partition_pointer[par_id] & 0x7FFFFFFF; const iT row_stop = d_partition_pointer[par_id + 1] & 0x7FFFFFFF; for (int rid = row_start; rid <= row_stop; rid++) { int ptr = d_row_pointer[rid]; int pid = ptr / (ANONYMOUSLIB_CSR5_OMEGA * sigma); if (pid == par_id) { int lx = (ptr / sigma) % ANONYMOUSLIB_CSR5_OMEGA; const int glid = ptr % sigma + bit_all_offset; const int ly = glid / 32; const int llid = glid % 32; const uiT val = 0x1 << (31 - llid); const int location = pid * ANONYMOUSLIB_CSR5_OMEGA * num_packet + ly * ANONYMOUSLIB_CSR5_OMEGA + lx; d_partition_descriptor[location] |= val; } } } } template<typename iT, typename uiT> void generate_partition_descriptor_s2_kernel(const uiT *d_partition_pointer, uiT *d_partition_descriptor, iT *d_partition_descriptor_offset_pointer, const int sigma, const int num_packet, const int bit_y_offset, const int bit_scansum_offset, const iT p) { int num_thread = omp_get_max_threads(); int *s_segn_scan_all = (int *)_mm_malloc(2 * ANONYMOUSLIB_CSR5_OMEGA * sizeof(int) * num_thread, ANONYMOUSLIB_X86_CACHELINE); int *s_present_all = (int *)_mm_malloc(2 * ANONYMOUSLIB_CSR5_OMEGA * sizeof(int) * num_thread, ANONYMOUSLIB_X86_CACHELINE); for (int i = 0; i < num_thread; i++) s_present_all[i * 2 * ANONYMOUSLIB_CSR5_OMEGA + ANONYMOUSLIB_CSR5_OMEGA] = 1; const int bit_all_offset = bit_y_offset + bit_scansum_offset; #pragma omp parallel for for (int par_id = 0; par_id < p-1; par_id++) { int tid = omp_get_thread_num(); int *s_segn_scan = &s_segn_scan_all[tid * 2 * ANONYMOUSLIB_CSR5_OMEGA]; int *s_present = &s_present_all[tid * 2 * ANONYMOUSLIB_CSR5_OMEGA]; memset(s_segn_scan, 0, (ANONYMOUSLIB_CSR5_OMEGA + 1) * sizeof(int)); memset(s_present, 0, ANONYMOUSLIB_CSR5_OMEGA * sizeof(int)); bool with_empty_rows = (d_partition_pointer[par_id] >> 31) & 0x1; iT row_start = d_partition_pointer[par_id] & 0x7FFFFFFF; const iT row_stop = d_partition_pointer[par_id + 1] & 0x7FFFFFFF; if (row_start == row_stop) continue; #pragma simd for (int lane_id = 0; lane_id < ANONYMOUSLIB_CSR5_OMEGA; lane_id++) { int start = 0, stop = 0, segn = 0; bool present = 0; uiT bitflag = 0; present |= !lane_id; // extract the first bit-flag packet int ly = 0; uiT first_packet = d_partition_descriptor[par_id * ANONYMOUSLIB_CSR5_OMEGA * num_packet + lane_id]; bitflag = (first_packet << bit_all_offset) | ((uiT)present << 31); start = !((bitflag >> 31) & 0x1); present |= (bitflag >> 31) & 0x1; for (int i = 1; i < sigma; i++) { if ((!ly && i == 32 - bit_all_offset) || (ly && (i - (32 - bit_all_offset)) % 32 == 0)) { ly++; bitflag = d_partition_descriptor[par_id * ANONYMOUSLIB_CSR5_OMEGA * num_packet + ly * ANONYMOUSLIB_CSR5_OMEGA + lane_id]; } const int norm_i = !ly ? i : i - (32 - bit_all_offset); stop += (bitflag >> (31 - norm_i % 32) ) & 0x1; present |= (bitflag >> (31 - norm_i % 32)) & 0x1; } // compute y_offset for all partitions segn = stop - start + present; segn = segn > 0 ? segn : 0; s_segn_scan[lane_id] = segn; // compute scansum_offset s_present[lane_id] = present; } scan_single<int>(s_segn_scan, ANONYMOUSLIB_CSR5_OMEGA + 1); if (with_empty_rows) { d_partition_descriptor_offset_pointer[par_id] = s_segn_scan[ANONYMOUSLIB_CSR5_OMEGA]; d_partition_descriptor_offset_pointer[p] += s_segn_scan[ANONYMOUSLIB_CSR5_OMEGA]; } #pragma simd for (int lane_id = 0; lane_id < ANONYMOUSLIB_CSR5_OMEGA; lane_id++) { int y_offset = s_segn_scan[lane_id]; int scansum_offset = 0; int next1 = lane_id + 1; if (s_present[lane_id]) { while (!s_present[next1] && next1 < ANONYMOUSLIB_CSR5_OMEGA) { scansum_offset++; next1++; } } uiT first_packet = d_partition_descriptor[par_id * ANONYMOUSLIB_CSR5_OMEGA * num_packet + lane_id]; y_offset = lane_id ? y_offset - 1 : 0; first_packet |= y_offset << (32 - bit_y_offset); first_packet |= scansum_offset << (32 - bit_all_offset); d_partition_descriptor[par_id * ANONYMOUSLIB_CSR5_OMEGA * num_packet + lane_id] = first_packet; } } _mm_free(s_segn_scan_all); _mm_free(s_present_all); } template<typename ANONYMOUSLIB_IT, typename ANONYMOUSLIB_UIT> int generate_partition_descriptor(const int sigma, const ANONYMOUSLIB_IT p, const ANONYMOUSLIB_IT m, const int bit_y_offset, const int bit_scansum_offset, const int num_packet, const ANONYMOUSLIB_IT *row_pointer, const ANONYMOUSLIB_UIT *partition_pointer, ANONYMOUSLIB_UIT *partition_descriptor, ANONYMOUSLIB_IT *partition_descriptor_offset_pointer, ANONYMOUSLIB_IT *_num_offsets) { int bit_all_offset = bit_y_offset + bit_scansum_offset; generate_partition_descriptor_s1_kernel<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT> (row_pointer, partition_pointer, partition_descriptor, m, p, sigma, bit_all_offset, num_packet); generate_partition_descriptor_s2_kernel<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT> (partition_pointer, partition_descriptor, partition_descriptor_offset_pointer, sigma, num_packet, bit_y_offset, bit_scansum_offset, p); if (partition_descriptor_offset_pointer[p]) scan_single<ANONYMOUSLIB_IT>(partition_descriptor_offset_pointer, p+1); *_num_offsets = partition_descriptor_offset_pointer[p]; // print for debug // cout << "partition_descriptor(1) = " << endl; // print_tile<ANONYMOUSLIB_UIT>(partition_descriptor, num_packet, ANONYMOUSLIB_CSR5_OMEGA); // cout << "partition_descriptor(2) = " << endl; // print_tile<ANONYMOUSLIB_UIT>(&partition_descriptor[num_packet * ANONYMOUSLIB_CSR5_OMEGA], num_packet, ANONYMOUSLIB_CSR5_OMEGA); return ANONYMOUSLIB_SUCCESS; } template<typename iT, typename uiT> void generate_partition_descriptor_offset_kernel(const iT *d_row_pointer, const uiT *d_partition_pointer, const uiT *d_partition_descriptor, const iT *d_partition_descriptor_offset_pointer, iT *d_partition_descriptor_offset, const iT p, const int num_packet, const int bit_y_offset, const int bit_scansum_offset, const int c_sigma) { const int bit_all_offset = bit_y_offset + bit_scansum_offset; const int bit_bitflag = 32 - bit_all_offset; #pragma omp parallel for for (int par_id = 0; par_id < p-1; par_id++) { bool with_empty_rows = (d_partition_pointer[par_id] >> 31) & 0x1; if (!with_empty_rows) continue; iT row_start = d_partition_pointer[par_id] & 0x7FFFFFFF; const iT row_stop = d_partition_pointer[par_id + 1] & 0x7FFFFFFF; int offset_pointer = d_partition_descriptor_offset_pointer[par_id]; #pragma simd for (int lane_id = 0; lane_id < ANONYMOUSLIB_CSR5_OMEGA; lane_id++) { bool local_bit; // extract the first bit-flag packet int ly = 0; uiT descriptor = d_partition_descriptor[par_id * ANONYMOUSLIB_CSR5_OMEGA * num_packet + lane_id]; int y_offset = descriptor >> (32 - bit_y_offset); descriptor = descriptor << bit_all_offset; descriptor = lane_id ? descriptor : descriptor | 0x80000000; local_bit = (descriptor >> 31) & 0x1; if (local_bit && lane_id) { const iT idx = par_id * ANONYMOUSLIB_CSR5_OMEGA * c_sigma + lane_id * c_sigma; const iT y_index = binary_search_right_boundary_kernel<iT>(&d_row_pointer[row_start+1], idx, row_stop - row_start) - 1; d_partition_descriptor_offset[offset_pointer + y_offset] = y_index; y_offset++; } for (int i = 1; i < c_sigma; i++) { if ((!ly && i == bit_bitflag) || (ly && !(31 & (i - bit_bitflag)))) { ly++; descriptor = d_partition_descriptor[par_id * ANONYMOUSLIB_CSR5_OMEGA * num_packet + ly * ANONYMOUSLIB_CSR5_OMEGA + lane_id]; } const int norm_i = 31 & (!ly ? i : i - bit_bitflag); local_bit = (descriptor >> (31 - norm_i)) & 0x1; if (local_bit) { const iT idx = par_id * ANONYMOUSLIB_CSR5_OMEGA * c_sigma + lane_id * c_sigma + i; const iT y_index = binary_search_right_boundary_kernel<iT>(&d_row_pointer[row_start+1], idx, row_stop - row_start) - 1; d_partition_descriptor_offset[offset_pointer + y_offset] = y_index; y_offset++; } } } } } template<typename ANONYMOUSLIB_IT, typename ANONYMOUSLIB_UIT> int generate_partition_descriptor_offset(const int sigma, const ANONYMOUSLIB_IT p, const int bit_y_offset, const int bit_scansum_offset, const int num_packet, const ANONYMOUSLIB_IT *row_pointer, const ANONYMOUSLIB_UIT *partition_pointer, ANONYMOUSLIB_UIT *partition_descriptor, ANONYMOUSLIB_IT *partition_descriptor_offset_pointer, ANONYMOUSLIB_IT *partition_descriptor_offset) { generate_partition_descriptor_offset_kernel<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT> (row_pointer, partition_pointer, partition_descriptor, partition_descriptor_offset_pointer, partition_descriptor_offset, p, num_packet, bit_y_offset, bit_scansum_offset, sigma); return ANONYMOUSLIB_SUCCESS; } template<typename T, typename uiT> void aosoa_transpose_kernel_smem(T *d_data, const uiT *d_partition_pointer, const int nnz, const int sigma, const bool R2C) // R2C==true means CSR->CSR5, otherwise CSR5->CSR { int num_p = ceil((double)nnz / (double)(ANONYMOUSLIB_CSR5_OMEGA * sigma)) - 1; int num_thread = omp_get_max_threads(); T *s_data_all = (T *)_mm_malloc(sigma * ANONYMOUSLIB_CSR5_OMEGA * sizeof(T) * num_thread, ANONYMOUSLIB_X86_CACHELINE); #pragma omp parallel for for (int par_id = 0; par_id < num_p; par_id++) { int tid = omp_get_thread_num(); T *s_data = &s_data_all[sigma * ANONYMOUSLIB_CSR5_OMEGA * tid]; // if this is fast track partition, do not transpose it if (d_partition_pointer[par_id] == d_partition_pointer[par_id + 1]) continue; // load global data to shared mem int idx_y, idx_x; #pragma simd for (int idx = 0; idx < ANONYMOUSLIB_CSR5_OMEGA * sigma; idx++) { if (R2C) { idx_y = idx % sigma; idx_x = idx / sigma; } else { idx_x = idx % ANONYMOUSLIB_CSR5_OMEGA; idx_y = idx / ANONYMOUSLIB_CSR5_OMEGA; } s_data[idx_y * ANONYMOUSLIB_CSR5_OMEGA + idx_x] = d_data[par_id * ANONYMOUSLIB_CSR5_OMEGA * sigma + idx]; } // store transposed shared mem data to global #pragma simd for (int idx = 0; idx < ANONYMOUSLIB_CSR5_OMEGA * sigma; idx++) { if (R2C) { idx_x = idx % ANONYMOUSLIB_CSR5_OMEGA; idx_y = idx / ANONYMOUSLIB_CSR5_OMEGA; } else { idx_y = idx % sigma; idx_x = idx / sigma; } d_data[par_id * ANONYMOUSLIB_CSR5_OMEGA * sigma + idx] = s_data[idx_y * ANONYMOUSLIB_CSR5_OMEGA + idx_x]; } } _mm_free(s_data_all); } template<typename ANONYMOUSLIB_IT, typename ANONYMOUSLIB_UIT, typename ANONYMOUSLIB_VT> int aosoa_transpose(const int sigma, const int nnz, const ANONYMOUSLIB_UIT *partition_pointer, ANONYMOUSLIB_IT *column_index, ANONYMOUSLIB_VT *value, bool R2C) { aosoa_transpose_kernel_smem<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT>(column_index, partition_pointer, nnz, sigma, R2C); aosoa_transpose_kernel_smem<ANONYMOUSLIB_VT, ANONYMOUSLIB_UIT>(value, partition_pointer, nnz, sigma, R2C); // // print for debug // cout << "column_index(1) = " << endl; // print_tile<ANONYMOUSLIB_IT>(column_index, sigma, ANONYMOUSLIB_CSR5_OMEGA); // cout << "column_index(2) = " << endl; // print_tile<ANONYMOUSLIB_IT>(&column_index[sigma * ANONYMOUSLIB_CSR5_OMEGA], sigma, ANONYMOUSLIB_CSR5_OMEGA); // // print for debug // cout << "value(1) = " << endl; // print_tile<ANONYMOUSLIB_VT>(value, sigma, ANONYMOUSLIB_CSR5_OMEGA); // cout << "value(2) = " << endl; // print_tile<ANONYMOUSLIB_VT>(&value[sigma * ANONYMOUSLIB_CSR5_OMEGA], sigma, ANONYMOUSLIB_CSR5_OMEGA); return ANONYMOUSLIB_SUCCESS; } #endif // FORMAT_AVX512_H
a.35.3.c
/* { dg-do compile } */ void work (int, int); void wrong3 (int n) { #pragma omp parallel default(shared) { int i; #pragma omp for for (i = 0; i < n; i++) { /* incorrect nesting of regions */ #pragma omp single /* { dg-warning "may not be closely nested" } */ work (i, 0); } } }
yolov2.h
#ifndef YOLOV3 #define YOLOV3 #include <stdio.h> #include <stdlib.h> //#include <iostream> #include <math.h> #include <fcntl.h> #include <string.h> #include <time.h> #include "xconv_hw.h" //#include "hw_drivers.h" #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #define FLT_MAX 3.402823466e+38F /* max value */ double what_time_is_it_now() { struct timeval time; if (gettimeofday(&time,NULL)){ return 0; } return (double)time.tv_sec + (double)time.tv_usec * .000001; } //#include "yolo_hls.h" typedef enum{ LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN } ACTIVATION; typedef enum { CONVOLUTIONAL, DECONVOLUTIONAL, CONNECTED, MAXPOOL, SOFTMAX, DETECTION, DROPOUT, CROP, ROUTE, COST, NORMALIZATION, AVGPOOL, LOCAL, SHORTCUT, ACTIVE, RNN, GRU, LSTM, CRNN, BATCHNORM, NETWORK, XNOR, REGION, YOLO, REORG, UPSAMPLE, LOGXENT, L2NORM, BLANK } LAYER_TYPE; struct network; typedef struct network network; struct layer; typedef struct layer layer; struct layer{ LAYER_TYPE type; ACTIVATION activation; void (*forward) (struct layer, struct network); int batch_normalize; int shortcut; int batch; int forced; int flipped; int inputs; int outputs; int nweights; int nbiases; int extra; int truths; int h,w,c; int out_h, out_w, out_c; int n; int max_boxes; int groups; int size; int side; int stride; int reverse; int flatten; int spatial; int pad; int sqrt; int flip; int index; int binary; int xnor; int steps; int hidden; int truth; float smooth; float dot; float angle; float jitter; float saturation; float exposure; float shift; float ratio; float learning_rate_scale; float clip; int softmax; int classes; int coords; int background; int rescore; int objectness; int joint; int noadjust; int reorg; int log; int tanh; int *mask; int total; float alpha; float beta; float kappa; float coord_scale; float object_scale; float noobject_scale; float mask_scale; float class_scale; int bias_match; int random; float ignore_thresh; float truth_thresh; float thresh; float focus; int classfix; int absolute; int onlyforward; int stopbackward; // int dontload; int dontsave; // int dontloadscales; float temperature; float probability; float scale; char * cweights; int * indexes; int * input_layers; int * input_sizes; int * map; float * rand; float * cost; float * state; float * prev_state; float * forgot_state; float * forgot_delta; float * state_delta; float * combine_cpu; float * combine_delta_cpu; float * concat; float * concat_delta; float * binary_weights; float * biases; float * bias_updates; float * scales; float * scale_updates; float * weights; float * weight_updates; float * delta; float * output; float * loss; float * squared; float * norms; float * spatial_mean; float * mean; float * variance; float * mean_delta; float * variance_delta; float * rolling_mean; float * rolling_variance; float * x; float * x_norm; float * m; float * v; float * bias_m; float * bias_v; float * scale_m; float * scale_v; float *z_cpu; float *r_cpu; float *h_cpu; float * prev_state_cpu; float *temp_cpu; float *temp2_cpu; float *temp3_cpu; float *dh_cpu; float *hh_cpu; float *prev_cell_cpu; float *cell_cpu; float *f_cpu; float *i_cpu; float *g_cpu; float *o_cpu; float *c_cpu; float *dc_cpu; float * binary_input; struct layer *input_layer; struct layer *self_layer; struct layer *output_layer; struct layer *reset_layer; struct layer *update_layer; struct layer *state_layer; struct layer *input_gate_layer; struct layer *state_gate_layer; struct layer *input_save_layer; struct layer *state_save_layer; struct layer *input_state_layer; struct layer *state_state_layer; struct layer *input_z_layer; struct layer *state_z_layer; struct layer *input_r_layer; struct layer *state_r_layer; struct layer *input_h_layer; struct layer *state_h_layer; struct layer *wz; struct layer *uz; struct layer *wr; struct layer *ur; struct layer *wh; struct layer *uh; struct layer *uo; struct layer *wo; struct layer *uf; struct layer *wf; struct layer *ui; struct layer *wi; struct layer *ug; struct layer *wg; //tree *softmax_tree; size_t workspace_size; }; void free_layer(layer l) { if(l.cweights) free(l.cweights); if(l.indexes) free(l.indexes); if(l.input_layers) free(l.input_layers); if(l.input_sizes) free(l.input_sizes); if(l.map) free(l.map); if(l.rand) free(l.rand); if(l.cost) free(l.cost); if(l.state) free(l.state); if(l.prev_state) free(l.prev_state); if(l.forgot_state) free(l.forgot_state); if(l.forgot_delta) free(l.forgot_delta); if(l.state_delta) free(l.state_delta); if(l.concat) free(l.concat); if(l.concat_delta) free(l.concat_delta); if(l.binary_weights) free(l.binary_weights); if(l.biases) free(l.biases); if(l.bias_updates) free(l.bias_updates); if(l.scales) free(l.scales); if(l.scale_updates) free(l.scale_updates); if(l.weights) free(l.weights); if(l.weight_updates) free(l.weight_updates); if(l.delta) free(l.delta); if(l.output) free(l.output); if(l.squared) free(l.squared); if(l.norms) free(l.norms); if(l.spatial_mean) free(l.spatial_mean); if(l.mean) free(l.mean); if(l.variance) free(l.variance); if(l.mean_delta) free(l.mean_delta); if(l.variance_delta) free(l.variance_delta); if(l.rolling_mean) free(l.rolling_mean); if(l.rolling_variance) free(l.rolling_variance); if(l.x) free(l.x); if(l.x_norm) free(l.x_norm); if(l.m) free(l.m); if(l.v) free(l.v); if(l.z_cpu) free(l.z_cpu); if(l.r_cpu) free(l.r_cpu); if(l.h_cpu) free(l.h_cpu); if(l.binary_input) free(l.binary_input); } //void free_layer(layer); typedef enum { CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM } learning_rate_policy; typedef struct network{ int n; int batch; size_t *seen; int *t; float epoch; int subdivisions; layer *layers; float *output; learning_rate_policy policy; float learning_rate; float momentum; float decay; float gamma; float scale; float power; int time_steps; int step; int max_batches; float *scales; int *steps; int num_steps; int burn_in; int adam; float B1; float B2; float eps; int inputs; int outputs; int truths; int notruth; int h, w, c; int max_crop; int min_crop; float max_ratio; float min_ratio; int center; float angle; float aspect; float exposure; float saturation; float hue; int random; int gpu_index; // tree *hierarchy; float *input; float *truth; float *delta; float *workspace; int train; int index; float *cost; float clip; } network; network *make_network(int n); layer get_network_output_layer(network *net); typedef struct { int w; int h; float scale; float rad; float dx; float dy; float aspect; } augment_args; typedef struct { int w; int h; int c; float *data; } image; typedef struct{ float x, y, w, h; } box; typedef struct detection{ box bbox; int classes; float *prob; float *mask; float objectness; int sort_class; } detection; typedef struct matrix{ int rows, cols; float **vals; } matrix; typedef struct{ int w, h; matrix X; matrix y; int shallow; int *num_boxes; box **boxes; } data; typedef enum { CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA } data_type; typedef struct load_args{ int threads; char **paths; char *path; int n; int m; char **labels; int h; int w; int out_w; int out_h; int nh; int nw; int num_boxes; int min, max, size; int classes; int background; int scale; int center; int coords; float jitter; float angle; float aspect; float saturation; float exposure; float hue; data *d; image *im; image *resized; data_type type; // tree *hierarchy; } load_args; typedef struct{ int id; float x,y,w,h; float left, right, top, bottom; } box_label; //network *load_network(char *cfg, char *weights, int clear); //load_args get_base_args(network *net); //void free_data(data d); typedef struct{ char *key; char *val; int used; } kvp; typedef struct node{ void *val; struct node *next; struct node *prev; } node; typedef struct list{ int size; node *front; node *back; } list; void error(const char *s) { perror(s); assert(0); exit(-1); } void malloc_error() { fprintf(stderr, "Malloc error\n"); exit(-1); } void file_error(char *s) { fprintf(stderr, "Couldn't open file: %s\n", s); exit(0); } /////////////////list begin list *make_list() { list *l = (list *)malloc(sizeof(list)); l->size = 0; l->front = 0; l->back = 0; return l; } void *list_pop(list *l){ if(!l->back) return 0; node *b = l->back; void *val = b->val; l->back = b->prev; if(l->back) l->back->next = 0; free(b); --l->size; return val; } void list_insert(list *l, void *val) { node *new_node = (node *)malloc(sizeof(node)); new_node->val = val; new_node->next = 0; if(!l->back){ l->front = new_node; new_node->prev = 0; }else{ l->back->next = new_node; new_node->prev = l->back; } l->back = new_node; ++l->size; } void free_node(node *n) { node *next; while(n) { next = n->next; free(n); n = next; } } void free_list(list *l) { free_node(l->front); free(l); } void free_list_contents(list *l) { node *n = l->front; while(n){ free(n->val); n = n->next; } } void **list_to_array(list *l) { void **a = (void **)calloc(l->size, sizeof(void*)); int count = 0; node *n = l->front; while(n){ a[count++] = n->val; n = n->next; } return a; } /////////////////list end /////////////////////utils begin void del_arg(int argc, char **argv, int index) { int i; for(i = index; i < argc-1; ++i) argv[i] = argv[i+1]; argv[i] = 0; } int find_arg(int argc, char* argv[], char *arg) { int i; for(i = 0; i < argc; ++i) { if(!argv[i]) continue; if(0==strcmp(argv[i], arg)) { del_arg(argc, argv, i); return 1; } } return 0; } int find_int_arg(int argc, char **argv, char *arg, int def) { int i; for(i = 0; i < argc-1; ++i){ if(!argv[i]) continue; if(0==strcmp(argv[i], arg)){ def = atoi(argv[i+1]); del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } float find_float_arg(int argc, char **argv, char *arg, float def) { int i; for(i = 0; i < argc-1; ++i){ if(!argv[i]) continue; if(0==strcmp(argv[i], arg)){ def = atof(argv[i+1]); del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } char *find_char_arg(int argc, char **argv, char *arg, char *def) { int i; for(i = 0; i < argc-1; ++i){ if(!argv[i]) continue; if(0==strcmp(argv[i], arg)){ def = argv[i+1]; del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } unsigned char *read_file(char *filename) { FILE *fp = fopen(filename, "rb"); size_t size; fseek(fp, 0, SEEK_END); size = ftell(fp); fseek(fp, 0, SEEK_SET); unsigned char *text = (unsigned char *)calloc(size+1, sizeof(unsigned char)); fread(text, 1, size, fp); fclose(fp); return text; } list *split_str(char *s, char delim) { size_t i; size_t len = strlen(s); list *l = make_list(); list_insert(l, s); for(i = 0; i < len; ++i){ if(s[i] == delim){ s[i] = '\0'; list_insert(l, &(s[i+1])); } } return l; } void strip(char *s) { size_t i; size_t len = strlen(s); size_t offset = 0; for(i = 0; i < len; ++i){ char c = s[i]; if(c==' '||c=='\t'||c=='\n') ++offset; else s[i-offset] = c; } s[len-offset] = '\0'; } void strip_char(char *s, char bad) { size_t i; size_t len = strlen(s); size_t offset = 0; for(i = 0; i < len; ++i){ char c = s[i]; if(c==bad) ++offset; else s[i-offset] = c; } s[len-offset] = '\0'; } void free_ptrs(void **ptrs, int n) { int i; for(i = 0; i < n; ++i) free(ptrs[i]); free(ptrs); } char *fgetl(FILE *fp) { if(feof(fp)) return 0; size_t size = 512; char *line = (char *)malloc(size*sizeof(char)); if(!fgets(line, size, fp)){ free(line); return 0; } size_t curr = strlen(line); while((line[curr-1] != '\n') && !feof(fp)){ if(curr == size-1){ size *= 2; line = (char *)realloc(line, size*sizeof(char)); if(!line) { printf("%ld\n", size); malloc_error(); } } size_t readsize = size-curr; if(readsize > INT_MAX) readsize = INT_MAX-1; fgets(&line[curr], readsize, fp); curr = strlen(line); } if(line[curr-1] == '\n') line[curr-1] = '\0'; return line; } /////////////////////utils end ////////////////////option_list begin void option_insert(list *l, char *key, char *val) { kvp *p = (kvp *)malloc(sizeof(kvp)); p->key = key; p->val = val; p->used = 0; list_insert(l, p); } int read_option(char *s, list *options) { size_t i; size_t len = strlen(s); char *val = 0; for(i = 0; i < len; ++i){ if(s[i] == '='){ s[i] = '\0'; val = s+i+1; break; } } if(i == len-1) return 0; char *key = s; option_insert(options, key, val); return 1; } void option_unused(list *l) { node *n = l->front; while(n){ kvp *p = (kvp *)n->val; if(!p->used){ fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val); } n = n->next; } } char *option_find(list *l, char *key) { node *n = l->front; while(n){ kvp *p = (kvp *)n->val; if(strcmp(p->key, key) == 0){ p->used = 1; return p->val; } n = n->next; } return 0; } char *option_find_str(list *l, char *key, char *def) { char *v = option_find(l, key); if(v) return v; if(def) fprintf(stderr, "%s: Using default '%s'\n", key, def); return def; } int option_find_int(list *l, char *key, int def) { char *v = option_find(l, key); if(v) return atoi(v); fprintf(stderr, "%s: Using default '%d'\n", key, def); return def; } int option_find_int_quiet(list *l, char *key, int def) { char *v = option_find(l, key); if(v) return atoi(v); return def; } float option_find_float_quiet(list *l, char *key, float def) { char *v = option_find(l, key); if(v) return atof(v); return def; } float option_find_float(list *l, char *key, float def) { char *v = option_find(l, key); if(v) return atof(v); fprintf(stderr, "%s: Using default '%lf'\n", key, def); return def; } list *read_data_cfg(char *filename) { FILE *file = fopen(filename, "r"); if(file == 0) file_error(filename); char *line; int nu = 0; list *options = make_list(); while((line=fgetl(file)) != 0){ ++ nu; strip(line); switch(line[0]){ case '\0': case '#': case ';': free(line); break; default: if(!read_option(line, options)){ fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); free(line); } break; } } fclose(file); return options; } ///////////////////option_list end image make_empty_image(int w, int h, int c) { image out; out.data = 0; out.h = h; out.w = w; out.c = c; return out; } list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } image make_image(int w, int h, int c) { image out = make_empty_image(w,h,c); out.data = (float *)calloc(h*w*c, sizeof(float)); return out; } static float get_pixel(image m, int x, int y, int c) { assert(x < m.w && y < m.h && c < m.c); return m.data[c*m.h*m.w + y*m.w + x]; } static void set_pixel(image m, int x, int y, int c, float val) { if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return; assert(x < m.w && y < m.h && c < m.c); m.data[c*m.h*m.w + y*m.w + x] = val; } static void add_pixel(image m, int x, int y, int c, float val) { assert(x < m.w && y < m.h && c < m.c); m.data[c*m.h*m.w + y*m.w + x] += val; } void free_image(image m) { if(m.data){ free(m.data); } } image resize_image(image im, int w, int h) { image resized = make_image(w, h, im.c); image part = make_image(w, im.h, im.c); int r, c, k; float w_scale = (float)(im.w - 1) / (w - 1); float h_scale = (float)(im.h - 1) / (h - 1); for(k = 0; k < im.c; ++k){ for(r = 0; r < im.h; ++r){ for(c = 0; c < w; ++c){ float val = 0; if(c == w-1 || im.w == 1){ val = get_pixel(im, im.w-1, r, k); } else { float sx = c*w_scale; int ix = (int) sx; float dx = sx - ix; val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k); } set_pixel(part, c, r, k, val); } } } for(k = 0; k < im.c; ++k){ for(r = 0; r < h; ++r){ float sy = r*h_scale; int iy = (int) sy; float dy = sy - iy; for(c = 0; c < w; ++c){ float val = (1-dy) * get_pixel(part, c, iy, k); set_pixel(resized, c, r, k, val); } if(r == h-1 || im.h == 1) continue; for(c = 0; c < w; ++c){ float val = dy * get_pixel(part, c, iy+1, k); add_pixel(resized, c, r, k, val); } } } free_image(part); return resized; } void fill_image(image m, float s) { int i; for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s; } void embed_image(image source, image dest, int dx, int dy) { int x,y,k; for(k = 0; k < source.c; ++k){ for(y = 0; y < source.h; ++y){ for(x = 0; x < source.w; ++x){ float val = get_pixel(source, x,y,k); set_pixel(dest, dx+x, dy+y, k, val); } } } } image letterbox_image(image im, int w, int h) { int new_w = im.w; int new_h = im.h; if (((float)w/im.w) < ((float)h/im.h)) { new_w = w; new_h = (im.h * w)/im.w; } else { new_h = h; new_w = (im.w * h)/im.h; } image resized = resize_image(im, new_w, new_h); image boxed = make_image(w, h, im.c); fill_image(boxed, .5); //int i; //for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0; embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2); free_image(resized); return boxed; } image load_image_stb(char *filename, int channels) { int w, h, c; unsigned char *data = stbi_load(filename, &w, &h, &c, channels); if (!data) { fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason()); exit(0); } if(channels) c = channels; int i,j,k; image im = make_image(w, h, c); for(k = 0; k < c; ++k){ for(j = 0; j < h; ++j){ for(i = 0; i < w; ++i){ int dst_index = i + w*j + w*h*k; int src_index = k + c*i + c*w*j; im.data[dst_index] = (float)data[src_index]/255.; } } } free(data); return im; } void save_image_png(image im, const char *name) { char buff[256]; //sprintf(buff, "%s (%d)", name, windows); sprintf(buff, "%s.png", name); unsigned char *data = (unsigned char *)calloc(im.w*im.h*im.c, sizeof(char)); int i,k; for(k = 0; k < im.c; ++k){ for(i = 0; i < im.w*im.h; ++i){ data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]); } } int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c); free(data); if(!success) fprintf(stderr, "Failed to write image %s\n", buff); } image **load_alphabet() { int i, j; const int nsize = 8; image **alphabets = (image **)calloc(nsize, sizeof(image)); for(j = 0; j < nsize; ++j){ alphabets[j] = (image *)calloc(128, sizeof(image)); for(i = 32; i < 127; ++i){ char buff[256]; sprintf(buff, "labels/%d_%d.png", i, j); //alphabets[j][i] = load_image_color(buff, 0, 0); alphabets[j][i] = load_image_stb(buff, 3); } } return alphabets; } ///////////////////activation begin static inline float stair_activate(float x) { int n = floor(x); if (n%2 == 0) return floor(x/2.); else return (x - n) + floor(x/2.); } static inline float hardtan_activate(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } static inline float linear_activate(float x){return x;} static inline float logistic_activate(float x){return 1./(1. + exp(-x));} static inline float loggy_activate(float x){return 2./(1. + exp(-x)) - 1;} static inline float relu_activate(float x){return x*(x>0);} static inline float elu_activate(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);} static inline float relie_activate(float x){return (x>0) ? x : .01*x;} static inline float ramp_activate(float x){return x*(x>0)+.1*x;} static inline float leaky_activate(float x){return (x>0) ? x : .1*x;} static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);} static inline float plse_activate(float x) { if(x < -4) return .01 * (x + 4); if(x > 4) return .01 * (x - 4) + 1; return .125*x + .5; } static inline float lhtan_activate(float x) { if(x < 0) return .001*x; if(x > 1) return .001*(x-1) + 1; return x; } static inline float lhtan_gradient(float x) { if(x > 0 && x < 1) return 1; return .001; } static inline float hardtan_gradient(float x) { if (x > -1 && x < 1) return 1; return 0; } static inline float linear_gradient(float x){return 1;} static inline float logistic_gradient(float x){return (1-x)*x;} static inline float loggy_gradient(float x) { float y = (x+1.)/2.; return 2*(1-y)*y; } static inline float stair_gradient(float x) { if (floor(x) == x) return 0; return 1; } static inline float relu_gradient(float x){return (x>0);} static inline float elu_gradient(float x){return (x >= 0) + (x < 0)*(x + 1);} static inline float relie_gradient(float x){return (x>0) ? 1 : .01;} static inline float ramp_gradient(float x){return (x>0)+.1;} static inline float leaky_gradient(float x){return (x>0) ? 1 : .1;} static inline float tanh_gradient(float x){return 1-x*x;} static inline float plse_gradient(float x){return (x < 0 || x > 1) ? .01 : .125;} char *get_activation_string(ACTIVATION a) { switch(a){ case LOGISTIC: return "logistic"; case LOGGY: return "loggy"; case RELU: return "relu"; case ELU: return "elu"; case RELIE: return "relie"; case RAMP: return "ramp"; case LINEAR: return "linear"; case TANH: return "tanh"; case PLSE: return "plse"; case LEAKY: return "leaky"; case STAIR: return "stair"; case HARDTAN: return "hardtan"; case LHTAN: return "lhtan"; default: break; } return "relu"; } ACTIVATION get_activation(char *s) { if (strcmp(s, "logistic")==0) return LOGISTIC; if (strcmp(s, "loggy")==0) return LOGGY; if (strcmp(s, "relu")==0) return RELU; if (strcmp(s, "elu")==0) return ELU; if (strcmp(s, "relie")==0) return RELIE; if (strcmp(s, "plse")==0) return PLSE; if (strcmp(s, "hardtan")==0) return HARDTAN; if (strcmp(s, "lhtan")==0) return LHTAN; if (strcmp(s, "linear")==0) return LINEAR; if (strcmp(s, "ramp")==0) return RAMP; if (strcmp(s, "leaky")==0) return LEAKY; if (strcmp(s, "tanh")==0) return TANH; if (strcmp(s, "stair")==0) return STAIR; fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s); return RELU; } float activate(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate(x); case LOGISTIC: return logistic_activate(x); case LOGGY: return loggy_activate(x); case RELU: return relu_activate(x); case ELU: return elu_activate(x); case RELIE: return relie_activate(x); case RAMP: return ramp_activate(x); case LEAKY: return leaky_activate(x); case TANH: return tanh_activate(x); case PLSE: return plse_activate(x); case STAIR: return stair_activate(x); case HARDTAN: return hardtan_activate(x); case LHTAN: return lhtan_activate(x); } return 0; } void activate_array(float *x, const int n, const ACTIVATION a) { int i; for(i = 0; i < n; ++i){ x[i] = activate(x[i], a); } } float gradient(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient(x); case LOGISTIC: return logistic_gradient(x); case LOGGY: return loggy_gradient(x); case RELU: return relu_gradient(x); case ELU: return elu_gradient(x); case RELIE: return relie_gradient(x); case RAMP: return ramp_gradient(x); case LEAKY: return leaky_gradient(x); case TANH: return tanh_gradient(x); case PLSE: return plse_gradient(x); case STAIR: return stair_gradient(x); case HARDTAN: return hardtan_gradient(x); case LHTAN: return lhtan_gradient(x); } return 0; } ///////////////////activation end void copy_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX]; } void fill_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] = ALPHA; } void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out) { int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); //printf("shorcut_layer batch=%d,stride=%d,sample=%d\n",batch,stride,sample); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int i,j,k,b; for(b = 0; b < batch; ++b){ for(k = 0; k < minc; ++k){ for(j = 0; j < minh; ++j){ for(i = 0; i < minw; ++i){ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] = s1*out[out_index] + s2*add[add_index]; } } } } } void forward_shortcut_layer(const layer l, network net) { //copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1); //shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output); //activate_array(l.output, l.outputs*l.batch, l.activation); int w = l.w; int h = l.h; int c = l.c; float *add = net.layers[l.index].output; float *out = l.output; float *in = net.input; int i,j,k; for(k = 0; k < c; ++k){ for(j = 0; j < h; ++j){ for(i = 0; i < w; ++i){ int index = i + w*(j + h*k ); out[index] = in[index] + add[index]; } } } } layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2) { fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c); layer l; memset(&l,0,sizeof(layer)); l.type = SHORTCUT; l.batch = batch; l.w = w2; l.h = h2; l.c = c2; l.out_w = w; l.out_h = h; l.out_c = c; l.outputs = w*h*c; l.inputs = l.outputs; l.index = index; l.output = (float *)calloc(l.outputs*batch, sizeof(float));; l.forward = forward_shortcut_layer; return l; } int convolutional_out_height(layer l) { return (l.h + 2*l.pad - l.size) / l.stride + 1; } int convolutional_out_width(layer l) { return (l.w + 2*l.pad - l.size) / l.stride + 1; } static size_t get_workspace_size(layer l){ return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float); } void add_bias(float *output, float *biases, int batch, int n, int size) { int i,j,b; for(b = 0; b < batch; ++b){ for(i = 0; i < n; ++i){ for(j = 0; j < size; ++j){ output[(b*n + i)*size + j] += biases[i]; } } } } void scale_bias(float *output, float *scales, int batch, int n, int size) { int i,j,b; for(b = 0; b < batch; ++b){ for(i = 0; i < n; ++i){ for(j = 0; j < size; ++j){ output[(b*n + i)*size + j] *= scales[i]; } } } } float im2col_get_pixel(float *im, int height, int width, int channels, int row, int col, int channel, int pad) { row -= pad; col -= pad; if (row < 0 || col < 0 || row >= height || col >= width) return 0; return im[col + width*(row + height*channel)]; } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c,h,w; int height_col = (height + 2*pad - ksize) / stride + 1; int width_col = (width + 2*pad - ksize) / stride + 1; int channels_col = channels * ksize * ksize; for (c = 0; c < channels_col; ++c) { int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); //else if(TA && !TB) // gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); //else if(!TA && TB) // gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); //else // gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { int b, f, i; for(b = 0; b < batch; ++b){ for(f = 0; f < filters; ++f){ for(i = 0; i < spatial; ++i){ int index = b*filters*spatial + f*spatial + i; x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f); } } } } void forward_batchnorm_layer(layer l, network net)//for conv { normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.out_c, l.out_h*l.out_w); scale_bias(l.output, l.scales, l.batch, l.out_c, l.out_h*l.out_w); add_bias(l.output, l.biases, l.batch, l.out_c, l.out_h*l.out_w); } void CONV_Padding_Relu(float *Input,float *Output,float *Weight,const int InFM_num,const int OutFM_num,const int Kernel_size,const int Kernel_stride,const int Input_w,const int Input_h,const int Padding) { // (output_w - 1)*Kernel_stride + Kernel_size = Input_w const int output_w = (Input_w - Kernel_size + 2*Padding)/Kernel_stride + 1 ; const int output_h = (Input_h - Kernel_size + 2*Padding)/Kernel_stride + 1 ; int x, y, of, inf; int m,n; for( of = 0; of < OutFM_num; of++){ for( y = 0; y < output_h; y++) { for( x = 0; x < output_w; x++){ float tmp = 0.0; for(inf = 0;inf < InFM_num; inf++) { int intput_offset = inf*Input_w*Input_h + (y*Kernel_stride - Padding)*Input_w + x*Kernel_stride - Padding; for(m = 0;m < Kernel_size; m++) { for(n = 0;n < Kernel_size; n++) { int kernel_offset = of*InFM_num*Kernel_size*Kernel_size + inf*Kernel_size*Kernel_size; bool inFM_width = ((x*Kernel_stride + n - Padding) >= 0)&&((x*Kernel_stride + n - Padding) < Input_w); bool inFM_height = ((y*Kernel_stride + m - Padding) >= 0)&&((y*Kernel_stride + m - Padding) < Input_h); if(inFM_width&&inFM_height) tmp += Weight[kernel_offset + m*Kernel_size + n]*Input[intput_offset + m*Input_w + n]; } } } Output[of*output_w*output_h + y*output_w + x] = tmp; } } } } void forward_convolutional_layer(layer l, network net) { int i, j; fill_cpu(l.outputs*l.batch, 0, l.output, 1); //printf("c=%d,n=%d,size=%d,stride=%d,w=%d,h=%d,pad=%d\n",l.c,l.n,l.size,l.stride,l.w,l.h,l.pad); //int m = l.n/l.groups; //int k = l.size*l.size*l.c/l.groups; //int n = l.out_w*l.out_h; //for(i = 0; i < l.batch; ++i){ // for(j = 0; j < l.groups; ++j){ // float *a = l.weights + j*l.nweights/l.groups; // float *b = net.workspace; // float *c = l.output + (i*l.groups + j)*n*m; // im2col_cpu(net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w, // l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); // gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); // } //} int m = l.n; int k = l.size*l.size*l.c; int n = l.out_w*l.out_h; float *a = l.weights; float *b = net.workspace; float *c = l.output; im2col_cpu(net.input,l.c, l.h, l.w, l.size, l.stride, l.pad, b); gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); //CONV_Padding_Relu(net.input,l.output,l.weights,l.c,l.n,l.size,l.stride,l.w,l.h,l.pad); if(l.batch_normalize){ forward_batchnorm_layer(l, net); } else { add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w); } activate_array(l.output, l.outputs*l.batch, l.activation); } layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam) { int i; layer l; memset(&l,0,sizeof(layer)); l.type = CONVOLUTIONAL; l.groups = groups; l.h = h; l.w = w; l.c = c; l.n = n; l.binary = binary; l.xnor = xnor; l.batch = batch; l.stride = stride; l.size = size; l.pad = padding; l.batch_normalize = batch_normalize; // l.weights = (float *)calloc(c/groups*n*size*size, sizeof(float)); // l.biases = (float *)calloc(n, sizeof(float)); l.nweights = c/groups*n*size*size; l.nbiases = n; int out_w = convolutional_out_width(l); int out_h = convolutional_out_height(l); l.out_h = out_h; l.out_w = out_w; l.out_c = n; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = l.w * l.h * l.c; // l.output = (float *)calloc(l.batch*l.outputs, sizeof(float)); l.forward = forward_convolutional_layer; if(batch_normalize){ // l.scales = (float *)calloc(n, sizeof(float)); // l.rolling_mean = (float *)calloc(n, sizeof(float)); //l.rolling_variance = (float *)calloc(n, sizeof(float)); } l.workspace_size = get_workspace_size(l); l.activation = activation; fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.); return l; } void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { int i, j, k, b; for(b = 0; b < batch; ++b){ for(k = 0; k < c; ++k){ for(j = 0; j < h*stride; ++j){ for(i = 0; i < w*stride; ++i){ int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride; int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i; if(forward) out[out_index] = scale*in[in_index]; else in[in_index] += scale*out[out_index]; } } } } } void forward_upsample_layer(const layer l, network net) { //fill_cpu(l.outputs*l.batch, 0, l.output, 1); //upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output); int c = l.c; int h = l.h; int w = l.w; int stride = l.stride; float *in = net.input; float *out = l.output; int i, j, k; for(k = 0; k < c; ++k){ for(j = 0; j < h*stride; ++j){ for(i = 0; i < w*stride; ++i){ int in_index = k*w*h + (j/stride)*w + i/stride; int out_index = k*w*h*stride*stride + j*w*stride + i; out[out_index] = in[in_index]; } } } } layer make_upsample_layer(int batch, int w, int h, int c, int stride) { layer l; memset(&l,0,sizeof(layer)); l.type = UPSAMPLE; l.batch = batch; l.w = w; l.h = h; l.c = c; l.out_w = w*stride; l.out_h = h*stride; l.out_c = c; if(stride < 0){ stride = -stride; l.reverse=1; l.out_w = w/stride; l.out_h = h/stride; } l.stride = stride; l.outputs = l.out_w*l.out_h*l.out_c; l.inputs = l.w*l.h*l.c; l.output = (float *)calloc(l.outputs*batch, sizeof(float));; l.forward = forward_upsample_layer; if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } void forward_route_layer(const layer l, network net) { int i, j; int offset = 0; for(i = 0; i < l.n; ++i){ int index = l.input_layers[i]; float *input = net.layers[index].output; int input_size = l.input_sizes[i]; copy_cpu(input_size, input, 1, l.output + offset, 1); offset += input_size; } } layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes) { fprintf(stderr,"route "); layer l; memset(&l,0,sizeof(layer)); l.type = ROUTE; l.batch = batch; l.n = n; l.input_layers = input_layers; l.input_sizes = input_sizes; int i; int outputs = 0; for(i = 0; i < n; ++i){ fprintf(stderr," %d", input_layers[i]); outputs += input_sizes[i]; } fprintf(stderr, "\n"); l.outputs = outputs; l.inputs = outputs; // l.output = (float *)calloc(outputs*batch, sizeof(float));; l.forward = forward_route_layer; return l; } static int entry_index(layer l, int batch, int location, int entry) { int n = location / (l.w*l.h); int loc = location % (l.w*l.h); return batch*l.outputs + n*l.w*l.h*(4+l.classes+1) + entry*l.w*l.h + loc; } void forward_yolo_layer(const layer l, network net) { int i,j,b,t,n; //char line[256]; //FILE *fp3; //char filename[256]; //sprintf(filename, "yolo_layer_%d.txt", l.outputs); //printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename); // if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n"); //int x; // for( x = 0; x < l.outputs; x++) //{ // sprintf(line, "%f\n", net.input[x]); // if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n"); // } // fclose(fp3); memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float)); for (b = 0; b < l.batch; ++b){ for(n = 0; n < l.n; ++n){ int index = entry_index(l, b, n*l.w*l.h, 0); activate_array(l.output + index, 2*l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, 4); activate_array(l.output + index, (1+l.classes)*l.w*l.h, LOGISTIC); } } return ; } layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes) { int i; layer l; memset(&l,0,sizeof(layer)); l.type = YOLO; l.n = n; l.total = total; l.batch = batch; l.h = h; l.w = w; l.c = n*(classes + 4 + 1); l.out_w = l.w; l.out_h = l.h; l.out_c = l.c; l.classes = classes; //l.cost = (float *)calloc(1, sizeof(float)); l.biases = (float *)calloc(total*2, sizeof(float)); if(mask) l.mask = mask; else{ l.mask = (int *)calloc(n, sizeof(int)); for(i = 0; i < n; ++i){ l.mask[i] = i; } } //l.bias_updates = (float *)calloc(n*2, sizeof(float)); l.outputs = h*w*n*(classes + 4 + 1); l.inputs = l.outputs; //l.truths = 90*(4 + 1); //l.delta = (float *)calloc(batch*l.outputs, sizeof(float)); l.output = (float *)calloc(batch*l.outputs, sizeof(float)); for(i = 0; i < total*2; ++i){ l.biases[i] = .5; } l.forward = forward_yolo_layer; fprintf(stderr, "detection\n"); srand(0); return l; } /////////////////praser begin typedef struct{ char *type; list *options; }section; list *read_cfg(char *filename); LAYER_TYPE string_to_layer_type(char * type) { if (strcmp(type, "[shortcut]")==0) return SHORTCUT; if (strcmp(type, "[crop]")==0) return CROP; if (strcmp(type, "[cost]")==0) return COST; if (strcmp(type, "[detection]")==0) return DETECTION; if (strcmp(type, "[region]")==0) return REGION; if (strcmp(type, "[yolo]")==0) return YOLO; if (strcmp(type, "[local]")==0) return LOCAL; if (strcmp(type, "[conv]")==0 || strcmp(type, "[convolutional]")==0) return CONVOLUTIONAL; if (strcmp(type, "[deconv]")==0 || strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL; if (strcmp(type, "[activation]")==0) return ACTIVE; if (strcmp(type, "[logistic]")==0) return LOGXENT; if (strcmp(type, "[l2norm]")==0) return L2NORM; if (strcmp(type, "[net]")==0 || strcmp(type, "[network]")==0) return NETWORK; if (strcmp(type, "[crnn]")==0) return CRNN; if (strcmp(type, "[gru]")==0) return GRU; if (strcmp(type, "[lstm]") == 0) return LSTM; if (strcmp(type, "[rnn]")==0) return RNN; if (strcmp(type, "[conn]")==0 || strcmp(type, "[connected]")==0) return CONNECTED; if (strcmp(type, "[max]")==0 || strcmp(type, "[maxpool]")==0) return MAXPOOL; if (strcmp(type, "[reorg]")==0) return REORG; if (strcmp(type, "[avg]")==0 || strcmp(type, "[avgpool]")==0) return AVGPOOL; if (strcmp(type, "[dropout]")==0) return DROPOUT; if (strcmp(type, "[lrn]")==0 || strcmp(type, "[normalization]")==0) return NORMALIZATION; if (strcmp(type, "[batchnorm]")==0) return BATCHNORM; if (strcmp(type, "[soft]")==0 || strcmp(type, "[softmax]")==0) return SOFTMAX; if (strcmp(type, "[route]")==0) return ROUTE; if (strcmp(type, "[upsample]")==0) return UPSAMPLE; return BLANK; } void free_section(section *s) { free(s->type); node *n = s->options->front; while(n){ kvp *pair = (kvp *)n->val; free(pair->key); free(pair); node *next = n->next; free(n); n = next; } free(s->options); free(s); } void parse_data(char *data, float *a, int n) { int i; if(!data) return; char *curr = data; char *next = data; int done = 0; for(i = 0; i < n && !done; ++i){ while(*++next !='\0' && *next != ','); if(*next == '\0') done = 1; *next = '\0'; sscanf(curr, "%g", &a[i]); curr = next+1; } } typedef struct size_params{ int batch; int inputs; int h; int w; int c; int index; int time_steps; network *net; } size_params; layer parse_convolutional(list *options, size_params params) { int n = option_find_int(options, "filters",1); int size = option_find_int(options, "size",1); int stride = option_find_int(options, "stride",1); int pad = option_find_int_quiet(options, "pad",0); int padding = option_find_int_quiet(options, "padding",0); int groups = option_find_int_quiet(options, "groups", 1); if(pad) padding = size/2; char *activation_s = option_find_str(options, "activation", "logistic"); ACTIVATION activation = get_activation(activation_s); int batch,h,w,c; h = params.h; w = params.w; c = params.c; batch=params.batch; if(!(h && w && c)) error("Layer before convolutional layer must output image."); int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0); int binary = option_find_int_quiet(options, "binary", 0); int xnor = option_find_int_quiet(options, "xnor", 0); layer l = make_convolutional_layer(batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, params.net->adam); l.flipped = option_find_int_quiet(options, "flipped", 0); l.dot = option_find_float_quiet(options, "dot", 0); return l; } int *parse_yolo_mask(char *a, int *num) { int *mask = 0; if(a){ int len = strlen(a); int n = 1; int i; for(i = 0; i < len; ++i){ if (a[i] == ',') ++n; } mask = (int *)calloc(n, sizeof(int)); for(i = 0; i < n; ++i){ int val = atoi(a); mask[i] = val; a = strchr(a, ',')+1; } *num = n; } return mask; } layer parse_yolo(list *options, size_params params) { int classes = option_find_int(options, "classes", 20); int total = option_find_int(options, "num", 1); int num = total; char *a = option_find_str(options, "mask", 0); int *mask = parse_yolo_mask(a, &num); layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes); assert(l.outputs == params.inputs); l.max_boxes = option_find_int_quiet(options, "max",90); l.jitter = option_find_float(options, "jitter", .2); l.ignore_thresh = option_find_float(options, "ignore_thresh", .5); l.truth_thresh = option_find_float(options, "truth_thresh", 1); l.random = option_find_int_quiet(options, "random", 0); a = option_find_str(options, "anchors", 0); if(a){ int len = strlen(a); int n = 1; int i; for(i = 0; i < len; ++i){ if (a[i] == ',') ++n; } for(i = 0; i < n; ++i){ float bias = atof(a); l.biases[i] = bias; a = strchr(a, ',')+1; } } return l; } layer parse_shortcut(list *options, size_params params, network *net) { char *l = option_find(options, "from"); int index = atoi(l); if(index < 0) index = params.index + index; int batch = params.batch; layer from = net->layers[index]; layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c); char *activation_s = option_find_str(options, "activation", "linear"); ACTIVATION activation = get_activation(activation_s); s.activation = activation; s.alpha = option_find_float_quiet(options, "alpha", 1); s.beta = option_find_float_quiet(options, "beta", 1); return s; } layer parse_upsample(list *options, size_params params, network *net) { int stride = option_find_int(options, "stride",2); layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride); l.scale = option_find_float_quiet(options, "scale", 1); return l; } layer parse_route(list *options, size_params params, network *net) { char *l = option_find(options, "layers"); int len = strlen(l); if(!l) error("Route Layer must specify input layers"); int n = 1; int i; for(i = 0; i < len; ++i){ if (l[i] == ',') ++n; } int *layers = (int *)calloc(n, sizeof(int)); int *sizes = (int *)calloc(n, sizeof(int)); for(i = 0; i < n; ++i){ int index = atoi(l); l = strchr(l, ',')+1; if(index < 0) index = params.index + index; layers[i] = index; sizes[i] = net->layers[index].outputs; } int batch = params.batch; layer route_layer = make_route_layer(batch, n, layers, sizes); layer first = net->layers[layers[0]]; route_layer.out_w = first.out_w; route_layer.out_h = first.out_h; route_layer.out_c = first.out_c; for(i = 1; i < n; ++i){ int index = layers[i]; layer next = net->layers[index]; if(next.out_w == first.out_w && next.out_h == first.out_h){ route_layer.out_c += next.out_c; }else{ route_layer.out_h = route_layer.out_w = route_layer.out_c = 0; } } return route_layer; } void softmax(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -FLT_MAX; for(i = 0; i < n; ++i){ if(input[i*stride] > largest) largest = input[i*stride]; } for(i = 0; i < n; ++i){ float e = exp(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int g, b; for(b = 0; b < batch; ++b){ for(g = 0; g < groups; ++g){ softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); } } } void forward_region_layer(const layer l, network net) { int i,j,b,t,n; memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float)); #ifndef GPU for (b = 0; b < l.batch; ++b){ for(n = 0; n < l.n; ++n){ int index = entry_index(l, b, n*l.w*l.h, 0); activate_array(l.output + index, 2*l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, l.coords); if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC); index = entry_index(l, b, n*l.w*l.h, l.coords + 1); //if(!l.softmax) activate_array(l.output + index, l.classes*l.w*l.h, LOGISTIC); } } if (l.softmax){ int index = entry_index(l, 0, 0, l.coords + !l.background); softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index); } // double time1,time2; // time1 = what_time_is_it_now(); // char line[256]; // FILE *fp3; // char filename[256]; // sprintf(filename, "yolo_region_input_float32_%d.txt", 13*13*425); // printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename); // if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n"); // int x; // for( x = 0; x < l.outputs; x++) // { // sprintf(line, "%f\n", net.input[x]); // if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n"); // } // fclose(fp3); // time2 = what_time_is_it_now(); // printf("Predicted in %f seconds.\n",time2 - time1); #endif if(!net.train) return; } layer make_region_layer(int batch, int w, int h, int n, int classes, int coords) { layer l; memset(&l,0,sizeof(layer)); l.type = REGION; l.n = n; l.batch = batch; l.h = h; l.w = w; l.c = n*(classes + coords + 1); l.out_w = l.w; l.out_h = l.h; l.out_c = l.c; l.classes = classes; l.coords = coords; l.biases = (float *)calloc(n*2, sizeof(float)); l.outputs = h*w*n*(classes + coords + 1); l.inputs = l.outputs; l.truths = 30*(l.coords + 1); l.output = (float *)calloc(batch*l.outputs, sizeof(float)); int i; for(i = 0; i < n*2; ++i){ l.biases[i] = .5; } l.forward = forward_region_layer; fprintf(stderr, "detection\n"); srand(0); return l; } layer parse_region(list *options, size_params params) { int coords = option_find_int(options, "coords", 4); int classes = option_find_int(options, "classes", 20); int num = option_find_int(options, "num", 1); layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords); assert(l.outputs == params.inputs); l.log = option_find_int_quiet(options, "log", 0); l.sqrt = option_find_int_quiet(options, "sqrt", 0); l.softmax = option_find_int(options, "softmax", 0); l.background = option_find_int_quiet(options, "background", 0); l.max_boxes = option_find_int_quiet(options, "max",30); l.jitter = option_find_float(options, "jitter", .2); l.rescore = option_find_int_quiet(options, "rescore",0); l.thresh = option_find_float(options, "thresh", .5); l.classfix = option_find_int_quiet(options, "classfix", 0); l.absolute = option_find_int_quiet(options, "absolute", 0); l.random = option_find_int_quiet(options, "random", 0); l.coord_scale = option_find_float(options, "coord_scale", 1); l.object_scale = option_find_float(options, "object_scale", 1); l.noobject_scale = option_find_float(options, "noobject_scale", 1); l.mask_scale = option_find_float(options, "mask_scale", 1); l.class_scale = option_find_float(options, "class_scale", 1); l.bias_match = option_find_int_quiet(options, "bias_match",0); char *tree_file = option_find_str(options, "tree", 0); // if (tree_file) l.softmax_tree = read_tree(tree_file); char *map_file = option_find_str(options, "map", 0); // if (map_file) l.map = read_map(map_file); char *a = option_find_str(options, "anchors", 0); if(a){ int len = strlen(a); int n = 1; int i; for(i = 0; i < len; ++i){ if (a[i] == ',') ++n; } for(i = 0; i < n; ++i){ float bias = atof(a); l.biases[i] = bias; a = strchr(a, ',')+1; } } return l; } void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int b,i,j,k; int out_c = c/(stride*stride); for(b = 0; b < batch; ++b){ for(k = 0; k < c; ++k){ for(j = 0; j < h; ++j){ for(i = 0; i < w; ++i){ int in_index = i + w*(j + h*(k + c*b)); int c2 = k % out_c; int offset = k / out_c; int w2 = i*stride + offset % stride; int h2 = j*stride + offset / stride; int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; } } } } } void forward_reorg_layer(const layer l, network net) { int i; //if(l.flatten){ // memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float)); // if(l.reverse){ // flatten(l.output, l.w*l.h, l.c, l.batch, 0); // }else{ // flatten(l.output, l.w*l.h, l.c, l.batch, 1); // } //} else if (l.extra) { // for(i = 0; i < l.batch; ++i){ // copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1); // } //} else if (l.reverse){ // reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output); //} else { reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output); //} } layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra) { layer l; memset(&l,0,sizeof(layer)); l.type = REORG; l.batch = batch; l.stride = stride; l.extra = extra; l.h = h; l.w = w; l.c = c; l.flatten = flatten; if(reverse){ l.out_w = w*stride; l.out_h = h*stride; l.out_c = c/(stride*stride); }else{ l.out_w = w/stride; l.out_h = h/stride; l.out_c = c*(stride*stride); } l.reverse = reverse; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; if(l.extra){ l.out_w = l.out_h = l.out_c = 0; l.outputs = l.inputs + l.extra; } if(extra){ fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs); } else { fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); } int output_size = l.outputs * batch; //l.output = (float *)calloc(output_size, sizeof(float)); l.forward = forward_reorg_layer; return l; } layer parse_reorg(list *options, size_params params) { int stride = option_find_int(options, "stride",1); int reverse = option_find_int_quiet(options, "reverse",0); int flatten = option_find_int_quiet(options, "flatten",0); int extra = option_find_int_quiet(options, "extra",0); int batch,h,w,c; h = params.h; w = params.w; c = params.c; batch=params.batch; if(!(h && w && c)) error("Layer before reorg layer must output image."); layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra); return layer; } void forward_maxpool_layer(layer l, network net) { int b,i,j,k,m,n; int w_offset = -l.pad; int h_offset = -l.pad; int h = l.out_h; int w = l.out_w; int c = l.c; for(b = 0; b < l.batch; ++b){ for(k = 0; k < c; ++k){ for(i = 0; i < h; ++i){ for(j = 0; j < w; ++j){ int out_index = j + w*(i + h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for(n = 0; n < l.size; ++n){ for(m = 0; m < l.size; ++m){ int cur_h = h_offset + i*l.stride + n; int cur_w = w_offset + j*l.stride + m; int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); float val = (valid != 0) ? net.input[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } l.output[out_index] = max; l.indexes[out_index] = max_i; } } } } } layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding) { layer l; memset(&l,0,sizeof(layer)); l.type = MAXPOOL; l.batch = batch; l.h = h; l.w = w; l.c = c; l.pad = padding; l.out_w = (w + padding - size)/stride + 1; l.out_h = (h + padding - size)/stride + 1; l.out_c = c; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; l.size = size; l.stride = stride; int output_size = l.out_h * l.out_w * l.out_c * batch; fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } layer parse_maxpool(list *options, size_params params) { int stride = option_find_int(options, "stride",1); int size = option_find_int(options, "size",stride); int padding = option_find_int_quiet(options, "padding", size-1); int batch,h,w,c; h = params.h; w = params.w; c = params.c; batch=params.batch; if(!(h && w && c)) error("Layer before maxpool layer must output image."); layer maxpool_layer = make_maxpool_layer(batch,h,w,c,size,stride,padding); return maxpool_layer; } learning_rate_policy get_policy(char *s) { if (strcmp(s, "random")==0) return RANDOM; if (strcmp(s, "poly")==0) return POLY; if (strcmp(s, "constant")==0) return CONSTANT; if (strcmp(s, "step")==0) return STEP; if (strcmp(s, "exp")==0) return EXP; if (strcmp(s, "sigmoid")==0) return SIG; if (strcmp(s, "steps")==0) return STEPS; fprintf(stderr, "Couldn't find policy %s, going with constant\n", s); return CONSTANT; } void parse_net_options(list *options, network *net) { net->batch = option_find_int(options, "batch",1); net->learning_rate = option_find_float(options, "learning_rate", .001); net->momentum = option_find_float(options, "momentum", .9); net->decay = option_find_float(options, "decay", .0001); int subdivs = option_find_int(options, "subdivisions",1); net->time_steps = option_find_int_quiet(options, "time_steps",1); net->notruth = option_find_int_quiet(options, "notruth",0); net->batch /= subdivs; net->batch *= net->time_steps; net->subdivisions = subdivs; net->random = option_find_int_quiet(options, "random", 0); net->adam = option_find_int_quiet(options, "adam", 0); if(net->adam){ net->B1 = option_find_float(options, "B1", .9); net->B2 = option_find_float(options, "B2", .999); net->eps = option_find_float(options, "eps", .0000001); } net->h = option_find_int_quiet(options, "height",0); net->w = option_find_int_quiet(options, "width",0); net->c = option_find_int_quiet(options, "channels",0); net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c); net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2); net->min_crop = option_find_int_quiet(options, "min_crop",net->w); net->max_ratio = option_find_float_quiet(options, "max_ratio", (float) net->max_crop / net->w); net->min_ratio = option_find_float_quiet(options, "min_ratio", (float) net->min_crop / net->w); net->center = option_find_int_quiet(options, "center",0); net->clip = option_find_float_quiet(options, "clip", 0); net->angle = option_find_float_quiet(options, "angle", 0); net->aspect = option_find_float_quiet(options, "aspect", 1); net->saturation = option_find_float_quiet(options, "saturation", 1); net->exposure = option_find_float_quiet(options, "exposure", 1); net->hue = option_find_float_quiet(options, "hue", 0); if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied"); char *policy_s = option_find_str(options, "policy", "constant"); net->policy = get_policy(policy_s); net->burn_in = option_find_int_quiet(options, "burn_in", 0); net->power = option_find_float_quiet(options, "power", 4); if(net->policy == STEP){ net->step = option_find_int(options, "step", 1); net->scale = option_find_float(options, "scale", 1); } else if (net->policy == STEPS){ char *l = option_find(options, "steps"); char *p = option_find(options, "scales"); if(!l || !p) error("STEPS policy must have steps and scales in cfg file"); int len = strlen(l); int n = 1; int i; for(i = 0; i < len; ++i){ if (l[i] == ',') ++n; } int *steps = (int *)calloc(n, sizeof(int)); float *scales = (float *)calloc(n, sizeof(float)); for(i = 0; i < n; ++i){ int step = atoi(l); float scale = atof(p); l = strchr(l, ',')+1; p = strchr(p, ',')+1; steps[i] = step; scales[i] = scale; } net->scales = scales; net->steps = steps; net->num_steps = n; } else if (net->policy == EXP){ net->gamma = option_find_float(options, "gamma", 1); } else if (net->policy == SIG){ net->gamma = option_find_float(options, "gamma", 1); net->step = option_find_int(options, "step", 1); } else if (net->policy == POLY || net->policy == RANDOM){ } net->max_batches = option_find_int(options, "max_batches", 0); } int is_network(section *s) { return (strcmp(s->type, "[net]")==0 || strcmp(s->type, "[network]")==0); } network *parse_network_cfg(char *filename) { list *sections = read_cfg(filename); node *n = sections->front; if(!n) error("Config file has no sections"); network *net = make_network(sections->size - 1); net->gpu_index = -1; size_params params; section *s = (section *)n->val; list *options = s->options; if(!is_network(s)) error("First section must be [net] or [network]"); parse_net_options(options, net); params.h = net->h; params.w = net->w; params.c = net->c; params.inputs = net->inputs; params.batch = net->batch; params.time_steps = net->time_steps; params.net = net; size_t workspace_size = 0; n = n->next; int count = 0; free_section(s); fprintf(stderr, "layer filters size input output\n"); while(n){ params.index = count; fprintf(stderr, "%5d ", count); s = (section *)n->val; options = s->options; //layer l = {0}; layer l; memset(&l,0,sizeof(layer)); LAYER_TYPE lt = string_to_layer_type(s->type); if(lt == CONVOLUTIONAL){ l = parse_convolutional(options, params); }else if(lt == YOLO){ l = parse_yolo(options, params); }else if(lt == ROUTE){ l = parse_route(options, params, net); }else if(lt == UPSAMPLE){ l = parse_upsample(options, params, net); }else if(lt == SHORTCUT){ l = parse_shortcut(options, params, net); }else if(lt == REGION){ l = parse_region(options, params); }else if(lt == YOLO){ l = parse_yolo(options, params); }else if(lt == MAXPOOL){ l = parse_maxpool(options, params); }else if(lt == REORG){ l = parse_reorg(options, params); }else{ fprintf(stderr, "Type not recognized: %s\n", s->type); } l.clip = net->clip; l.truth = option_find_int_quiet(options, "truth", 0); l.onlyforward = option_find_int_quiet(options, "onlyforward", 0); l.stopbackward = option_find_int_quiet(options, "stopbackward", 0); l.dontsave = option_find_int_quiet(options, "dontsave", 0); // l.dontload = option_find_int_quiet(options, "dontload", 0); // l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0); //l.learning_rate_scale = option_find_float_quiet(options, "learning_rate", 1); l.smooth = option_find_float_quiet(options, "smooth", 0); option_unused(options); net->layers[count] = l; if (l.workspace_size > workspace_size) workspace_size = l.workspace_size; free_section(s); n = n->next; ++count; if(n){ params.h = l.out_h; params.w = l.out_w; params.c = l.out_c; params.inputs = l.outputs; } } free_list(sections); layer out = get_network_output_layer(net); net->outputs = out.outputs; net->output = out.output; //net->input = (float *)calloc(net->inputs*net->batch, sizeof(float)); workspace_size = 0;//donot calloc workspace //if(workspace_size){ // //printf("%ld\n", workspace_size); // net->workspace = (float *)calloc(1, workspace_size); //} return net; } list *read_cfg(char *filename) { FILE *file = fopen(filename, "r"); if(file == 0) file_error(filename); char *line; int nu = 0; list *options = make_list(); section *current = 0; while((line=fgetl(file)) != 0){ ++ nu; strip(line); switch(line[0]){ case '[': current = (section *)malloc(sizeof(section)); list_insert(options, current); current->options = make_list(); current->type = line; break; case '\0': case '#': case ';': free(line); break; default: if(!read_option(line, current->options)){ fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); free(line); } break; } } fclose(file); return options; } void load_convolutional_weights(layer l, FILE *fp) { int num = l.nweights; fread(l.biases, sizeof(float), l.n, fp); if (l.batch_normalize){ fread(l.scales, sizeof(float), l.n, fp); fread(l.rolling_mean, sizeof(float), l.n, fp); fread(l.rolling_variance, sizeof(float), l.n, fp); } fread(l.weights, sizeof(float), num, fp); } void load_weights_upto(network *net, char *filename, int start, int cutoff) { fprintf(stderr, "Loading weights from %s...", filename); fflush(stdout); FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); int major; int minor; int revision; fread(&major, sizeof(int), 1, fp); fread(&minor, sizeof(int), 1, fp); fread(&revision, sizeof(int), 1, fp); printf("major=%d;minor=%d;revision=%d\n",major,minor,revision);// 0 2 0 printf("if true ro false:%d\n",(major*10 + minor) >= 2 && major < 1000 && minor < 1000); if ((major*10 + minor) >= 2 && major < 1000 && minor < 1000){ //fread(net->seen, sizeof(size_t), 1, fp); fread(net->seen, sizeof(size_t), 1, fp); fread(net->seen, sizeof(size_t), 1, fp); }else { int iseen = 0; fread(&iseen, sizeof(int), 1, fp); *net->seen = iseen; } //printf("sizeof(size_t)=%u\n",sizeof(size_t));// in my PC is 4 int i; for(i = start; i < net->n && i < cutoff; ++i){ layer l = net->layers[i]; if(l.type == CONVOLUTIONAL){ load_convolutional_weights(l, fp); } } fprintf(stderr, "Done!\n"); fclose(fp); } void load_weights(network *net, char *filename) { load_weights_upto(net, filename, 0, net->n); } /////////////////praser end /////////////////network begin load_args get_base_args(network *net) { load_args args = {0}; args.w = net->w; args.h = net->h; args.size = net->w; args.min = net->min_crop; args.max = net->max_crop; args.angle = net->angle; args.aspect = net->aspect; args.exposure = net->exposure; args.center = net->center; args.saturation = net->saturation; args.hue = net->hue; return args; } network *load_network(char *cfg, char *weights, int clear) { network *net = parse_network_cfg(cfg); //if(weights && weights[0] != 0){ // load_weights(net, weights); //} if(clear) (*net->seen) = 0; return net; } char *get_layer_string(LAYER_TYPE a) { switch(a){ case CONVOLUTIONAL: return "convolutional"; case ACTIVE: return "activation"; case LOCAL: return "local"; case DECONVOLUTIONAL: return "deconvolutional"; case CONNECTED: return "connected"; case RNN: return "rnn"; case GRU: return "gru"; case LSTM: return "lstm"; case CRNN: return "crnn"; case MAXPOOL: return "maxpool"; case REORG: return "reorg"; case AVGPOOL: return "avgpool"; case SOFTMAX: return "softmax"; case DETECTION: return "detection"; case REGION: return "region"; case YOLO: return "yolo"; case DROPOUT: return "dropout"; case CROP: return "crop"; case COST: return "cost"; case ROUTE: return "route"; case SHORTCUT: return "shortcut"; case NORMALIZATION: return "normalization"; case BATCHNORM: return "batchnorm"; default: break; } return "none"; } network *make_network(int n) { network *net = (network *)calloc(1, sizeof(network)); net->n = n; net->layers = (layer *)calloc(net->n, sizeof(layer)); net->seen = (size_t *)calloc(1, sizeof(size_t)); net->t = (int *)calloc(1, sizeof(int)); net->cost = (float *)calloc(1, sizeof(float)); return net; } void forward_network(network *netp) { network net = *netp; int i; for(i = 0; i < net.n; ++i){ net.index = i; layer l = net.layers[i]; l.forward(l, net); net.input = l.output; // printf("layer [%d]\n",i); } } void set_temp_network(network *net, float t) { int i; for(i = 0; i < net->n; ++i){ net->layers[i].temperature = t; } } void set_batch_network(network *net, int b) { net->batch = b; int i; for(i = 0; i < net->n; ++i){ net->layers[i].batch = b; } } float *network_predict(network *net, float *input) { network orig = *net; net->input = input; net->truth = 0; net->train = 0; net->delta = 0; forward_network(net); float *out = net->output; *net = orig; return out; } int yolo_num_detections(layer l, float thresh) { int i, n; int count = 0; for (i = 0; i < l.w*l.h; ++i){ for(n = 0; n < l.n; ++n){ int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4); if(l.output[obj_index] > thresh){ ++count; } } } return count; } int num_detections(network *net, float thresh) { int i; int s = 0; for(i = 0; i < net->n; ++i){ layer l = net->layers[i]; if(l.type == YOLO){ s += yolo_num_detections(l, thresh); } if(l.type == DETECTION || l.type == REGION){ s += l.w*l.h*l.n; } } return s; } detection *make_network_boxes(network *net, float thresh, int *num) { layer l = net->layers[net->n - 1]; int i; int nboxes = num_detections(net, thresh); //printf("num_detections nboxes = %d\n",nboxes); if(num) *num = nboxes; detection *dets = (detection *)calloc(nboxes, sizeof(detection)); for(i = 0; i < nboxes; ++i){ dets[i].prob = (float *)calloc(l.classes, sizeof(float)); } return dets; } box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride) { box b; b.x = (i + x[index + 0*stride]) / lw; b.y = (j + x[index + 1*stride]) / lh; b.w = exp(x[index + 2*stride]) * biases[2*n] / w; b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h; return b; } void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w=0; int new_h=0; if (((float)netw/w) < ((float)neth/h)) { new_w = netw; new_h = (h * netw)/w; } else { new_h = neth; new_w = (w * neth)/h; } for (i = 0; i < n; ++i){ box b = dets[i].bbox; b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw); b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth); b.w *= (float)netw/new_w; b.h *= (float)neth/new_h; if(!relative){ b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets) { int i,j,n; float *predictions = l.output; // if (l.batch == 2) avg_flipped_yolo(l); int count = 0; for (i = 0; i < l.w*l.h; ++i){ int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4); float objectness = predictions[obj_index]; if(objectness <= thresh) continue; int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h); dets[count].objectness = objectness; dets[count].classes = l.classes; for(j = 0; j < l.classes; ++j){ int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j); float prob = objectness*predictions[class_index]; dets[count].prob[j] = (prob > thresh) ? prob : 0; } ++count; } } correct_yolo_boxes(dets, count, w, h, netw, neth, relative); return count; } box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride) { box b; b.x = (i + x[index + 0*stride]) / w; b.y = (j + x[index + 1*stride]) / h; b.w = exp(x[index + 2*stride]) * biases[2*n] / w; b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h; return b; } void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w=0; int new_h=0; if (((float)netw/w) < ((float)neth/h)) { new_w = netw; new_h = (h * netw)/w; } else { new_h = neth; new_w = (w * neth)/h; } for (i = 0; i < n; ++i){ box b = dets[i].bbox; b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw); b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth); b.w *= (float)netw/new_w; b.h *= (float)neth/new_h; if(!relative){ b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets) { int i,j,n,z; float *predictions = l.output; if (l.batch == 2) { float *flip = l.output + l.outputs; for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w/2; ++i) { for (n = 0; n < l.n; ++n) { for(z = 0; z < l.classes + l.coords + 1; ++z){ int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i; int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1); float swap = flip[i1]; flip[i1] = flip[i2]; flip[i2] = swap; if(z == 0){ flip[i1] = -flip[i1]; flip[i2] = -flip[i2]; } } } } } for(i = 0; i < l.outputs; ++i){ l.output[i] = (l.output[i] + flip[i])/2.; } } for (i = 0; i < l.w*l.h; ++i){ int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int index = n*l.w*l.h + i; for(j = 0; j < l.classes; ++j){ dets[index].prob[j] = 0; } int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4); float scale = l.background ? 1 : predictions[obj_index]; dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h); dets[index].objectness = scale > thresh ? scale : 0; if(dets[index].mask){ for(j = 0; j < l.coords - 4; ++j){ dets[index].mask[j] = l.output[mask_index + j*l.w*l.h]; } } int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background); if(dets[index].objectness){ for(j = 0; j < l.classes; ++j){ int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } } } correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative); } void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets) { int j; for(j = 0; j < net->n; ++j){ layer l = net->layers[j]; if(l.type == YOLO){ int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets); dets += count; } if(l.type == REGION){ get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets); dets += l.w*l.h*l.n; } } } detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num) { detection *dets = make_network_boxes(net, thresh, num); fill_network_boxes(net, w, h, thresh, hier, map, relative, dets); return dets; } void free_detections(detection *dets, int n) { int i; for(i = 0; i < n; ++i){ free(dets[i].prob); if(dets[i].mask) free(dets[i].mask); } free(dets); } int network_width(network *net){return net->w;} int network_height(network *net){return net->h;} layer get_network_output_layer(network *net) { int i; for(i = net->n - 1; i >= 0; --i){ if(net->layers[i].type != COST) break; } return net->layers[i]; } void free_network(network *net) { int i; for(i = 0; i < net->n; ++i){ free_layer(net->layers[i]); } free(net->layers); if(net->input) free(net->input); if(net->truth) free(net->truth); free(net); } layer network_output_layer(network *net) { int i; for(i = net->n - 1; i >= 0; --i){ if(net->layers[i].type != COST) break; } return net->layers[i]; } int network_inputs(network *net) { return net->layers[0].inputs; } int network_outputs(network *net) { return network_output_layer(net).outputs; } float *network_output(network *net) { return network_output_layer(net).output; } //////////////////network end //////////////////////box begin int nms_comparator(const void *pa, const void *pb) { detection a = *(detection *)pa; detection b = *(detection *)pb; float diff = 0; if(b.sort_class >= 0){ diff = a.prob[b.sort_class] - b.prob[b.sort_class]; } else { diff = a.objectness - b.objectness; } if(diff < 0) return 1; else if(diff > 0) return -1; return 0; } float overlap(float x1, float w1, float x2, float w2) { float l1 = x1 - w1/2; float l2 = x2 - w2/2; float left = l1 > l2 ? l1 : l2; float r1 = x1 + w1/2; float r2 = x2 + w2/2; float right = r1 < r2 ? r1 : r2; return right - left; } float box_intersection(box a, box b) { float w = overlap(a.x, a.w, b.x, b.w); float h = overlap(a.y, a.h, b.y, b.h); if(w < 0 || h < 0) return 0; float area = w*h; return area; } float box_union(box a, box b) { float i = box_intersection(a, b); float u = a.w*a.h + b.w*b.h - i; return u; } float box_iou(box a, box b) { return box_intersection(a, b)/box_union(a, b); } void do_nms_sort(detection *dets, int total, int classes, float thresh) { int i, j, k; k = total-1; for(i = 0; i <= k; ++i){ if(dets[i].objectness == 0){ detection swap = dets[i]; dets[i] = dets[k]; dets[k] = swap; --k; --i; } } total = k+1; for(k = 0; k < classes; ++k){ for(i = 0; i < total; ++i){ dets[i].sort_class = k; } qsort(dets, total, sizeof(detection), nms_comparator); for(i = 0; i < total; ++i){ if(dets[i].prob[k] == 0) continue; box a = dets[i].bbox; for(j = i+1; j < total; ++j){ box b = dets[j].bbox; if (box_iou(a, b) > thresh){ dets[j].prob[k] = 0; } } } } } //////////////////////box end //////////////////////image begin float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} }; float get_color(int c, int x, int max) { float ratio = ((float)x/max)*5; int i = floor(ratio); int j = ceil(ratio); ratio -= i; float r = (1-ratio) * colors[i][c] + ratio*colors[j][c]; //printf("%f\n", r); return r; } static float get_pixel_extend(image m, int x, int y, int c) { if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0; /* if(x < 0) x = 0; if(x >= m.w) x = m.w-1; if(y < 0) y = 0; if(y >= m.h) y = m.h-1; */ if(c < 0 || c >= m.c) return 0; return get_pixel(m, x, y, c); } void composite_image(image source, image dest, int dx, int dy) { int x,y,k; for(k = 0; k < source.c; ++k){ for(y = 0; y < source.h; ++y){ for(x = 0; x < source.w; ++x){ float val = get_pixel(source, x, y, k); float val2 = get_pixel_extend(dest, dx+x, dy+y, k); set_pixel(dest, dx+x, dy+y, k, val * val2); } } } } image border_image(image a, int border) { image b = make_image(a.w + 2*border, a.h + 2*border, a.c); int x,y,k; for(k = 0; k < b.c; ++k){ for(y = 0; y < b.h; ++y){ for(x = 0; x < b.w; ++x){ float val = get_pixel_extend(a, x - border, y - border, k); if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1; set_pixel(b, x, y, k, val); } } } return b; } image copy_image(image p) { image copy = p; copy.data = (float *)calloc(p.h*p.w*p.c, sizeof(float)); memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float)); return copy; } image tile_images(image a, image b, int dx) { if(a.w == 0) return copy_image(b); image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c); fill_cpu(c.w*c.h*c.c, 1, c.data, 1); embed_image(a, c, 0, 0); composite_image(b, c, a.w + dx, 0); return c; } image get_label(image **characters, char *string, int size) { size = size/10; if(size > 7) size = 7; image label = make_empty_image(0,0,0); while(*string){ image l = characters[size][(int)*string]; image n = tile_images(label, l, -size - 1 + (size+1)/2); free_image(label); label = n; ++string; } image b = border_image(label, label.h*.25); free_image(label); return b; } void draw_label(image a, int r, int c, image label, const float *rgb) { int w = label.w; int h = label.h; if (r - h >= 0) r = r - h; int i, j, k; for(j = 0; j < h && j + r < a.h; ++j){ for(i = 0; i < w && i + c < a.w; ++i){ for(k = 0; k < label.c; ++k){ float val = get_pixel(label, i, j, k); set_pixel(a, i+c, j+r, k, rgb[k] * val); } } } } void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b) { //normalize_image(a); int i; if(x1 < 0) x1 = 0; if(x1 >= a.w) x1 = a.w-1; if(x2 < 0) x2 = 0; if(x2 >= a.w) x2 = a.w-1; if(y1 < 0) y1 = 0; if(y1 >= a.h) y1 = a.h-1; if(y2 < 0) y2 = 0; if(y2 >= a.h) y2 = a.h-1; for(i = x1; i <= x2; ++i){ a.data[i + y1*a.w + 0*a.w*a.h] = r; a.data[i + y2*a.w + 0*a.w*a.h] = r; a.data[i + y1*a.w + 1*a.w*a.h] = g; a.data[i + y2*a.w + 1*a.w*a.h] = g; a.data[i + y1*a.w + 2*a.w*a.h] = b; a.data[i + y2*a.w + 2*a.w*a.h] = b; } for(i = y1; i <= y2; ++i){ a.data[x1 + i*a.w + 0*a.w*a.h] = r; a.data[x2 + i*a.w + 0*a.w*a.h] = r; a.data[x1 + i*a.w + 1*a.w*a.h] = g; a.data[x2 + i*a.w + 1*a.w*a.h] = g; a.data[x1 + i*a.w + 2*a.w*a.h] = b; a.data[x2 + i*a.w + 2*a.w*a.h] = b; } } void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b) { int i; for(i = 0; i < w; ++i){ draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b); } } image float_to_image(int w, int h, int c, float *data) { image out = make_empty_image(w,h,c); out.data = data; return out; } image threshold_image(image im, float thresh) { int i; image t = make_image(im.w, im.h, im.c); for(i = 0; i < im.w*im.h*im.c; ++i){ t.data[i] = im.data[i]>thresh ? 1 : 0; } return t; } void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes) { int i,j; for(i = 0; i < num; ++i){ char labelstr[4096] = {0}; int class_t = -1; for(j = 0; j < classes; ++j){ if (dets[i].prob[j] > thresh){ if (class_t < 0) { strcat(labelstr, names[j]); class_t = j; } else { strcat(labelstr, ", "); strcat(labelstr, names[j]); } printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100); } } if(class_t >= 0){ int width = im.h * .006; //printf("%d %s: %.0f%%\n", i, names[class], prob*100); int offset = class_t*123457 % classes; float red = get_color(2,offset,classes); float green = get_color(1,offset,classes); float blue = get_color(0,offset,classes); float rgb[3]; //width = prob*20+2; rgb[0] = red; rgb[1] = green; rgb[2] = blue; box b = dets[i].bbox; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); int left = (b.x-b.w/2.)*im.w; int right = (b.x+b.w/2.)*im.w; int top = (b.y-b.h/2.)*im.h; int bot = (b.y+b.h/2.)*im.h; if(left < 0) left = 0; if(right > im.w-1) right = im.w-1; if(top < 0) top = 0; if(bot > im.h-1) bot = im.h-1; draw_box_width(im, left, top, right, bot, width, red, green, blue); if (alphabet) { image label = get_label(alphabet, labelstr, (im.h*.03)); draw_label(im, top + width, left, label, rgb); free_image(label); } if (dets[i].mask){ image mask = float_to_image(14, 14, 1, dets[i].mask); image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h); image tmask = threshold_image(resized_mask, .5); embed_image(tmask, im, left, top); free_image(mask); free_image(resized_mask); free_image(tmask); } } } } //////////////////////image end //////////////////////////HLS begin //#define MIN(x,y) ((x)<(y)?(x):(y)) //#define S 2 //#define K 3 // //#define Tn 1 //#define Tm 16 //#define Tr 13 //#define Tc 13 //#define OnChipIB_Width ((Tc-1)*S+K) //#define OnChipIB_Height ((Tr-1)*S+K) #define MAX(x,y) ((x)>(y)?(x):(y)) #define MIN(x,y) ((x)<(y)?(x):(y)) #define S 2 #define K 3 #define Tn 4 #define Tm 32 #define Tr 26 #define Tc 26 #define OnChipIB_Width ((Tc-1)*S+K) #define OnChipIB_Height ((Tr-1)*S+K) #define ALPHA_BETA_MAX_NUM 1024 #define INTERWIDTH 20 void copy_mem2dev(uint8_t *orig,uint32_t byte_num, unsigned long in_buffer) { int fd = open("/dev/mem", O_RDWR); unsigned char *virtual_addr; uint32_t RequestByteNum;// must page if(byte_num%(4*1024)==0) RequestByteNum = byte_num; else { RequestByteNum = (byte_num/(4*1024)+1)*(4*1024); } virtual_addr = (unsigned char *)mmap(NULL, RequestByteNum, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)in_buffer); if(virtual_addr == MAP_FAILED) { perror("Virtual_addr_in mappong for absolute memory access failed!\n"); return; } memcpy(virtual_addr,orig,byte_num); munmap((void *)virtual_addr, byte_num); close(fd); } void copy_dev2mem(uint8_t *dst,uint32_t byte_num, unsigned long in_buffer) { int fd = open("/dev/mem", O_RDWR); unsigned char *virtual_addr; uint32_t RequestByteNum;// must page if(byte_num%(4*1024)==0) RequestByteNum = byte_num; else { RequestByteNum = (byte_num/(4*1024)+1)*(4*1024); } virtual_addr = (unsigned char *)mmap(NULL, RequestByteNum, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)in_buffer); if(virtual_addr == MAP_FAILED) { perror("Virtual_addr_in mappong for absolute memory access failed!\n"); return; } memcpy((uint8_t *)dst,virtual_addr,byte_num); munmap((void *)virtual_addr, byte_num); close(fd); } int copy_file2mem(char *bin_file,uint32_t byte_num,unsigned long in_buffer) { unsigned char *buffer = (unsigned char *)malloc(1024*1024); if(buffer==NULL){ printf("cannot malloc buffer 1024*1024 byte\n"); return -1; } FILE *fp; if( (fp = fopen(bin_file, "rb")) == NULL)fprintf(stderr,"CANNOT OPEN bin_file\n"); int rd_num; unsigned long offset = 0; while(rd_num = fread(buffer, sizeof(unsigned char), 1024*1024, fp)) { copy_mem2dev(buffer,rd_num, in_buffer+offset); // printf("rd_num=%d\n",rd_num); offset += rd_num; } printf("copy_file2mem offset=%d\n",offset); fclose(fp); free(buffer); return 0; } int copy_mem2file(char *bin_file,uint32_t byte_num,unsigned long in_buffer) { void *buffer = malloc(1024*1024); if(buffer==NULL){ printf("cannot malloc buffer 1024*1024 byte\n"); return -1; } FILE *fp; if( (fp = fopen(bin_file, "wb")) == NULL)fprintf(stderr,"CANNOT OPEN bin_file\n"); int x = byte_num; int addbyte; unsigned long offset = 0; while(addbyte=((x<1024*1024)?x:(1024*1024))) { copy_dev2mem((uint8_t *)buffer,addbyte, in_buffer+offset); fwrite(buffer , sizeof(unsigned char), addbyte, fp); x -= addbyte; offset += addbyte; } printf("copy_mem2file offset=%d\n",offset); fclose(fp); free(buffer); return 0; } //double what_time_is_it_now() //{ // struct timeval time; // if (gettimeofday(&time,NULL)){ // return 0; // } // return (double)time.tv_sec + (double)time.tv_usec * .000001; //} int YOLO2_FPGA(int In_Address,int Out_Address,int Weight_offset,int Beta_offset,const int InFM_num,const int OutFM_num, const int Kernel_size,const int Kernel_stride, const int Input_w,const int Input_h,const int Output_w,const int Output_h, const int Padding,const bool IsNL,const bool IsBN, const int TM,const int TN,const int TR,const int TC, const int mLoops,const int nLoops,const int rLoops,const int cLoops,const int LayerType, int InputQ,int OutputQ,int WeightQ,int BetaQ,unsigned int WEIGHT_BASE,unsigned int BETA_BASE) { int T2Rate; switch(Input_w) { case 26: T2Rate = 2; break; case 13: T2Rate = 4; break; default: T2Rate = 1; break; } const unsigned char TRow = (TR-1)*Kernel_stride+Kernel_size; int trow_loops = (int)ceil(((float)TRow/T2Rate)); unsigned int ap_idle; unsigned int ap_done; unsigned long int PhysicalAddress = YOLO2_BASEADDR; int map_len = 0x180; int fd = open("/dev/mem", O_RDWR); unsigned char *xbase_address; xbase_address = (unsigned char *)mmap(NULL, map_len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)PhysicalAddress); if(xbase_address == MAP_FAILED) { perror("1:Init Mapping memory for absolute memory access failed.\n"); return -1; } while(1) { ap_idle = ((ReadReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL) >> 2) && 0x1); if(ap_idle) break; } //#define WEIGHT_BASE (0x10000000) //#define BETA_BASE (0x1C25F000) WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_R_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT1_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT2_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT3_DATA, In_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_R_DATA, Out_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT1_DATA, Out_Address); // WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT2_DATA, Out_Address); // WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT3_DATA, Out_Address); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_WEIGHT_DATA, WEIGHT_BASE + Weight_offset*4); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_BETA_DATA, BETA_BASE + Beta_offset*4); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INFM_NUM_DATA, InFM_num); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTFM_NUM_DATA, OutFM_num); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_KERNEL_SIZE_DATA, Kernel_size); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_KERNEL_STRIDE_DATA, Kernel_stride); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_W_DATA, Input_w); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_H_DATA, Input_h); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_W_DATA, Output_w); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_H_DATA, Output_h); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_PADDING_DATA, Padding); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_ISNL_DATA, IsNL); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_ISBN_DATA, IsBN); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TM_DATA, TM); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TN_DATA, TN); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TR_DATA, TR); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TC_DATA, TC); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_MLOOPS_DATA, mLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_NLOOPS_DATA, nLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_RLOOPS_DATA, rLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_CLOOPS_DATA, cLoops); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_LAYERTYPE_DATA, LayerType); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUTQ_DATA, InputQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUTQ_DATA, OutputQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_WEIGHTQ_DATA, WeightQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_BETAQ_DATA, BetaQ); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TROW_LOOPS_DATA, trow_loops); // double time1,time2; // time1 = what_time_is_it_now(); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_GIE, 0x0); WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL, 0x1);//Start while(1) { ap_done = ((ReadReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL) >> 1) && 0x1); if(ap_done) break; } // time2 = what_time_is_it_now(); // printf("START TO DONE in %f seconds.\n",time2 - time1); munmap((void *)xbase_address, map_len); close(fd); return 0; } ////////////////////////////////////////////////////////PL v3 end void yolov2_hls_ps(network *net, float *input,unsigned int WEIGHT_BASE,unsigned int BETA_BASE,unsigned int MEM_BASE) { int x; network orig = *net; net->input = input; int weight_offset[32] = {864, 18432, 73728, 8192, 73728, 294912, 32768, 294912, 1179648, 131072, 1179648, 131072, 1179648, 4718592, 524288, 4718592, 524288, 4718592, 9437184, 9437184, 32768, 11796480, 435200, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int beta_offset[32] = {32, 64, 128, 64, 128, 256, 128, 256, 512, 256, 512, 256, 512, 1024, 512, 1024, 512, 1024, 1024, 1024, 64, 1024, 425, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int offset_index = 0; double time1,time2; time1 = what_time_is_it_now(); copy_file2mem("weightsv2_comb_reorg_ap16.bin",(203767168)/2,WEIGHT_BASE);//->C253D80 printf("yolov2_w copy ok\n"); copy_file2mem("biasv2_comb_ap16.bin",(43044+4)/2,BETA_BASE);//->C268724 203812864 = C25F000 printf("yolov2_b copy ok\n"); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); float *region_buf = (float *)calloc(13*13*432,sizeof(float)); if(!region_buf) printf("region_buf calloc fail\n"); #define MEM_LEN (416*416*32*2+208*208*32*2) unsigned int Memory_top = MEM_BASE; unsigned int Memory_bottom = MEM_BASE + MEM_LEN; int in_ptr[32]; int out_ptr[32]; ///////////////////// #define QNUM 23 int inputQ[QNUM+1]; int weightQ[QNUM]; int betaQ[QNUM]; FILE *Qin; Qin = fopen("yolov2_ap16_inout_maxQ_24.bin","rb"); if(!Qin) file_error("Qin error 1\n"); fread(inputQ,sizeof(int),QNUM+1,Qin); fclose(Qin); if(inputQ[20] < inputQ[21]) inputQ[21] = inputQ[20]; else inputQ[20] = inputQ[21]; for(x=0;x<QNUM+1;x++) printf("[%2d inputQ]=%2d\n",x,inputQ[x]); Qin = fopen("weightsv2_comb_reorg_ap16_maxQ_23.bin","rb"); if(!Qin) file_error("Qin error 2\n"); fread(weightQ,sizeof(int),QNUM,Qin); fclose(Qin); for(x=0;x<QNUM;x++) printf("[%2d weightQ]=%2d\n",x,weightQ[x]); Qin = fopen("biasv2_comb_ap16_maxQ_23.bin","rb"); if(!Qin) file_error("Qin error 4\n"); fread(betaQ,sizeof(int),QNUM,Qin); fclose(Qin); for(x=0;x<QNUM;x++) printf("[%2d betaQ]=%2d\n",x,betaQ[x]); const double LastLayerOutputPara = pow(2.0,-inputQ[23]); ///////////////////// #define ROUTE16_LEN (26*26*512*4/2) #define CONV27_LEN (13*13*256*4/2) #define CONV24_LEN (13*13*1024*4/2) int *input_tmp_mem = (int *)calloc(416*416*32/2,sizeof(int)); if(!input_tmp_mem) file_error("input_tmp_mem error \n"); int *region_input_buffer = (int *)calloc(13*13*432*4/2,sizeof(int)); if(!region_input_buffer) file_error("region_input_buffer error \n"); int tmp_in; short current_in,next_in; bool NextPixelInFlag = true; int InputPixelOffset = 0; for(x=0;x<416*416*3;x++)//1st Layer input Q14 { if(NextPixelInFlag) { current_in = (short)(input[x]*pow(2.0,14)); NextPixelInFlag = false; } else { next_in = (short)(input[x]*pow(2.0,14)); tmp_in = (next_in<<16) + (current_in); input_tmp_mem[InputPixelOffset] = tmp_in; InputPixelOffset++; NextPixelInFlag = true; } } copy_mem2dev((uint8_t *)input_tmp_mem,416*416*3*4/2, MEM_BASE); free(input_tmp_mem); for(x=0;x<18;x++) { if(x%2==0) { in_ptr[x] = Memory_top; out_ptr[x] = Memory_bottom - net->layers[x].outputs*4/2 ; } else { in_ptr[x] = out_ptr[x-1]; out_ptr[x] = Memory_top; } } for(x=18;x<25;x++) { if(x%2==0) { in_ptr[x] = Memory_top; out_ptr[x] = Memory_bottom - ROUTE16_LEN - net->layers[x].outputs*4/2; }else { in_ptr[x] = out_ptr[x-1]; out_ptr[x] = Memory_top; } } in_ptr[26] = Memory_bottom - ROUTE16_LEN; out_ptr[26] = Memory_top; in_ptr[27] = Memory_top; out_ptr[27] = Memory_bottom - ROUTE16_LEN - CONV24_LEN - CONV27_LEN; in_ptr[29] = out_ptr[27]; out_ptr[29] = Memory_top; in_ptr[30] = Memory_top; out_ptr[30] = Memory_bottom - (net->layers[30].outputs + 13*13*3)*4/2; if(out_ptr[30]%(4*1024)!=0) { out_ptr[30] = (out_ptr[30]/(4*1024)-1)*(4*1024); } in_ptr[31] = out_ptr[30]; network netp = *net; int i; int j; int woffset = 0; int boffset = 0; int TR,TC,TM,TN; int output_w,output_h; int rLoops,cLoops,mLoops,nLoops; double time_sum = 0.0; int INPUTQ; for(i = 0; i < netp.n; ++i) { netp.index = i; layer l = netp.layers[i]; printf("Layer[%2d]: ",i); switch(l.type) { case CONVOLUTIONAL: printf("outputMemory:%8d;BN=%d;Activation=%d;conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n",l.outputs,l.batch_normalize,l.activation, l.n, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.); output_w = (l.w - l.size + 2*l.pad)/l.stride + 1 ; output_h = (l.h - l.size + 2*l.pad)/l.stride + 1 ; TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1 TR = MIN(output_h,TR); TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc); TC = MIN(output_w,TC); TM = MIN(l.n,Tm); TN = MIN(l.c,Tn); rLoops = (int)ceil(((float)output_h)/TR); cLoops = (int)ceil(((float)output_w)/TC); mLoops = (int)ceil(((float)l.n)/TM); nLoops = (int)ceil(((float)l.c)/TN); INPUTQ = inputQ[offset_index]; if(i==26) INPUTQ = inputQ[12]; time1 = what_time_is_it_now(); YOLO2_FPGA(in_ptr[i],out_ptr[i],woffset/2,boffset/2, l.c,l.n,l.size, l.stride,l.w,l.h,output_w,output_h, l.pad,l.activation==LEAKY?1:0,l.batch_normalize?1:0, TM,TN,TR,TC, mLoops,nLoops,rLoops,cLoops,0, INPUTQ,inputQ[offset_index+1],weightQ[offset_index],betaQ[offset_index], WEIGHT_BASE,BETA_BASE); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); woffset += weight_offset[offset_index]; boffset += beta_offset[offset_index]; offset_index++; break; case MAXPOOL: printf("outputMemory:%8d;max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c); output_w = l.out_h; output_h = l.out_w; TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1 TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc); TR = MIN(output_h,TR); TC = MIN(output_w,TC); TM = MIN(Tm,Tn); TM = MIN(l.c,TM); TN = TM; rLoops = (int)ceil(((float)output_h)/TR); cLoops = (int)ceil(((float)output_w)/TC); mLoops = (int)ceil(((float)l.c)/TM); time1 = what_time_is_it_now(); YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,l.c,l.c, l.size,l.stride,l.w,l.h,output_w,output_h, 0,0,0,TM,TN,TR,TC,mLoops,1,rLoops,cLoops,1, inputQ[offset_index],inputQ[offset_index],INTERWIDTH,INTERWIDTH, WEIGHT_BASE,BETA_BASE); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); break; case REORG: printf("outputMemory:%8d;reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c); output_w = 26; output_h = 32*13; TR = MIN(((OnChipIB_Height-l.stride)/l.stride+1),Tr);//keep Kernel_stride>=1 TR = MIN(output_h,TR); TC = MIN(((OnChipIB_Width-l.stride)/l.stride+1),Tc); TC = MIN(output_w,TC); TM = 4; TN = TM; rLoops = (int)ceil(((float)output_h)/TR); cLoops = (int)ceil(((float)output_w)/TC); mLoops = 1; time1 = what_time_is_it_now(); YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,1,4, l.stride,l.stride,52,32*26,output_w,output_h, 0,0,0,TM,TN,TR,TC,mLoops,1,rLoops,cLoops,2, inputQ[offset_index],inputQ[offset_index],INTERWIDTH,INTERWIDTH, WEIGHT_BASE,BETA_BASE); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); break; case ROUTE: printf("outputMemory:%8d;route ",l.outputs); for(j = 0; j < l.n; ++j){ printf(" %d", l.input_layers[j]); } printf("\n"); break; case REGION: // first=time(NULL); time1 = what_time_is_it_now(); printf("outputMemory:%8d;Detection\n",l.outputs); copy_dev2mem((uint8_t *)region_input_buffer,13*13*432*4/2, in_ptr[i]); bool NextPixelFlag = true; int OutputPixelOffset = 0; short current_p,next_p,output_p; int *Output_ptr = (int *)(region_input_buffer); for(j=0;j<l.outputs;j++) { if(NextPixelFlag) { int tmp_p = Output_ptr[OutputPixelOffset]; OutputPixelOffset++; current_p = tmp_p; next_p = tmp_p >> 16; output_p = current_p; NextPixelFlag = false; }else { output_p = next_p; NextPixelFlag = true; } region_buf[j] = output_p*LastLayerOutputPara; } netp.input = region_buf; //netp.input = in_ptr[i]; forward_region_layer(l,netp); time2 = what_time_is_it_now(); printf("Predicted in %f seconds.\n",time2 - time1); time_sum += (time2 - time1); break; } netp.input = l.output; } printf("TIME_SUM Predicted in %f seconds.\n",time_sum); *net = orig; free(region_input_buffer); free(region_buf); // free(Memory_buf); // free(Weight_buf); // free(Alpha_buf); // free(Beta_buf); } //////////////////////////HLS end #endif
pr49640.c
/* PR middle-end/49640 */ /* { dg-do compile } */ /* { dg-options "-O2 -std=gnu99 -fopenmp" } */ void foo (int N, int M, int K, int P, int Q, int R, int i, int j, int k, unsigned char x[P][Q][R], int y[N][M][K]) { int ii, jj, kk; #pragma omp parallel for private(ii,jj,kk) for (ii = 0; ii < P; ++ii) for (jj = 0; jj < Q; ++jj) for (kk = 0; kk < R; ++kk) y[i + ii][j + jj][k + kk] = x[ii][jj][kk]; } void bar (int N, int M, int K, int P, int Q, int R, int i, int j, int k, unsigned char x[P][Q][R], float y[N][M][K], float factor, float zero) { int ii, jj, kk; #pragma omp parallel for private(ii,jj,kk) for (ii = 0; ii < P; ++ii) for (jj = 0; jj < Q; ++jj) for (kk = 0; kk < R; ++kk) y[i + ii][j + jj][k + kk] = factor * x[ii][jj][kk] + zero; }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H #include "../InternalHeaderCheck.h" namespace Eigen { namespace internal { template<typename LhsScalar_, typename RhsScalar_> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor,ResInnerStride> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor,Unaligned,ResInnerStride> ResMapper; LhsMapper lhs(_lhs, lhsStride); RhsMapper rhs(_rhs, rhsStride); ResMapper res(_res, resStride, resIncr); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users = threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #if !EIGEN_HAS_CXX11_ATOMIC #pragma omp atomic #endif info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename LhsScalar_, typename RhsScalar_> class level3_blocking { typedef LhsScalar_ LhsScalar; typedef RhsScalar_ RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename LhsScalar_, typename RhsScalar_, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,LhsScalar_,RhsScalar_,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,RhsScalar_,LhsScalar_>::type, typename conditional<StorageOrder==RowMajor,LhsScalar_,RhsScalar_>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,RhsScalar_,LhsScalar_>::type LhsScalar; typedef typename conditional<Transpose,LhsScalar_,RhsScalar_>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename LhsScalar_, typename RhsScalar_, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,LhsScalar_,RhsScalar_,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,RhsScalar_,LhsScalar_>::type, typename conditional<StorageOrder==RowMajor,LhsScalar_,RhsScalar_>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,RhsScalar_,LhsScalar_>::type LhsScalar; typedef typename conditional<Transpose,LhsScalar_,RhsScalar_>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program // to determine the following heuristic. // EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h, // unless it has been specialized by the user or for a given architecture. // Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs. // I'm not sure it is still required. if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>()); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; if (dst.cols() == 1) { // Fallback to GEMV if either the lhs or rhs is a runtime vector typename Dest::ColXpr dst_vec(dst.col(0)); return internal::generic_product_impl<Lhs,typename Rhs::ConstColXpr,DenseShape,DenseShape,GemvProduct> ::scaleAndAddTo(dst_vec, a_lhs, a_rhs.col(0), alpha); } else if (dst.rows() == 1) { // Fallback to GEMV if either the lhs or rhs is a runtime vector typename Dest::RowXpr dst_vec(dst.row(0)); return internal::generic_product_impl<typename Lhs::ConstRowXpr,Rhs,DenseShape,DenseShape,GemvProduct> ::scaleAndAddTo(dst_vec, a_lhs.row(0), a_rhs, alpha); } typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = combine_scalar_factors(alpha, a_lhs, a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor, Dest::InnerStrideAtCompileTime>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
dataset.h
#ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/meta.h> #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <vector> #include <utility> #include <functional> #include <string> #include <unordered_set> #include <mutex> namespace LightGBM { /*! \brief forward declaration */ class DatasetLoader; /*! * \brief This class is used to store some meta(non-feature) data for training data, * e.g. labels, weights, initial scores, qurey level informations. * * Some details: * 1. Label, used for traning. * 2. Weights, weighs of records, optional * 3. Query Boundaries, necessary for lambdarank. * The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] ) * 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed) * the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1]) * 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0. */ class Metadata { public: /*! * \brief Null costructor */ Metadata(); /*! * \brief Initialization will load qurey level informations, since it is need for sampling data * \param data_filename Filename of data * \param init_score_filename Filename of initial score */ void Init(const char* data_filename, const char* initscore_file); /*! * \brief init as subset * \param metadata Filename of data * \param used_indices * \param num_used_indices */ void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices); /*! * \brief Initial with binary memory * \param memory Pointer to memory */ void LoadFromMemory(const void* memory); /*! \brief Destructor */ ~Metadata(); /*! * \brief Initial work, will allocate space for label, weight(if exists) and query(if exists) * \param num_data Number of training data * \param weight_idx Index of weight column, < 0 means doesn't exists * \param query_idx Index of query id column, < 0 means doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /*! * \brief Partition label by used indices * \param used_indices Indice of local used */ void PartitionLabel(const std::vector<data_size_t>& used_indices); /*! * \brief Partition meta data according to local used indices if need * \param num_all_data Number of total training data, including other machines' data on parallel learning * \param used_data_indices Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector<data_size_t>& used_data_indices); void SetLabel(const label_t* label, data_size_t len); void SetWeights(const label_t* weights, data_size_t len); void SetQuery(const data_size_t* query, data_size_t len); /*! * \brief Set initial scores * \param init_score Initial scores, this class will manage memory for init_score. */ void SetInitScore(const double* init_score, data_size_t len); /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const; /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const; /*! * \brief Get pointer of label * \return Pointer of label */ inline const label_t* label() const { return label_.data(); } /*! * \brief Set label for one record * \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /*! * \brief Set Weight for one record * \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /*! * \brief Set Query Id for one record * \param idx Index of this record * \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast<data_size_t>(value); } /*! * \brief Get weights, if not exists, will return nullptr * \return Pointer of weights */ inline const label_t* weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /*! * \brief Get data boundaries on queries, if not exists, will return nullptr * we assume data will order by query, * the interval of [query_boundaris[i], query_boundaris[i+1]) * is the data indices for query i. * \return Pointer of data boundaries on queries */ inline const data_size_t* query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /*! * \brief Get Number of queries * \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /*! * \brief Get weights for queries, if not exists, will return nullptr * \return Pointer of weights for queries */ inline const label_t* query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /*! * \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double* init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /*! * \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /*! \brief Disable copy */ Metadata& operator=(const Metadata&) = delete; /*! \brief Disable copy */ Metadata(const Metadata&) = delete; private: /*! \brief Load initial scores from file */ void LoadInitialScore(const char* initscore_file); /*! \brief Load wights from file */ void LoadWeights(); /*! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /*! \brief Load query wights */ void LoadQueryWeights(); /*! \brief Filename of current data */ std::string data_filename_; /*! \brief Number of data */ data_size_t num_data_; /*! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /*! \brief Label data */ std::vector<label_t> label_; /*! \brief Weights data */ std::vector<label_t> weights_; /*! \brief Query boundaries */ std::vector<data_size_t> query_boundaries_; /*! \brief Query weights */ std::vector<label_t> query_weights_; /*! \brief Number of querys */ data_size_t num_queries_; /*! \brief Number of Initial score, used to check correct weight file */ int64_t num_init_score_; /*! \brief Initial score */ std::vector<double> init_score_; /*! \brief Queries data */ std::vector<data_size_t> queries_; /*! \brief mutex for threading safe call */ std::mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /*! \brief Interface for Parser */ class Parser { public: /*! \brief virtual destructor */ virtual ~Parser() {} /*! * \brief Parse one line with label * \param str One line record, string format, should end with '\0' * \param out_features Output columns, store in (column_idx, values) * \param out_label Label will store to this if exists */ virtual void ParseOneLine(const char* str, std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0; virtual int TotalColumns() const = 0; /*! * \brief Create a object of parser, will auto choose the format depend on file * \param filename One Filename of data * \param num_features Pass num_features of this data file if you know, <=0 means don't know * \param label_idx index of label column * \return Object of parser */ static Parser* CreateParser(const char* filename, bool has_header, int num_features, int label_idx); }; /*! \brief The main class of data set, * which are used to traning or validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector<std::unique_ptr<BinMapper>>& bin_mappers, int** sample_non_zero_indices, const int* num_per_col, size_t total_sample_cnt, const IOConfig& io_config); /*! \brief Destructor */ LIGHTGBM_EXPORT ~Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) { if (is_finish_load_) { return; } for (auto& inner_data : feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx) const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx) const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx) const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx) const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx) const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector<int> ValidFeatureIndices() const { std::vector<int> ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data); LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr); /*! * \brief Save current dataset into binary file, will save to "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset); void ConstructHistograms(const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, int leaf_idx, std::vector<std::unique_ptr<OrderedBin>>& ordered_bins, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, bool is_constant_hessian, HistogramBinEntry* histogram_data) const; void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data, HistogramBinEntry* data) const; inline data_size_t Split(int feature, const uint32_t* threshold, int num_threshold, bool default_left, data_size_t* data_indices, data_size_t num_data, data_size_t* lte_indices, data_size_t* gt_indices) const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i) const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int8_t FeatureMonotone(int i) const { if (monotone_types_.empty()) { return 0; } else { return monotone_types_[i]; } } bool HasMonotone() const { if (monotone_types_.empty()) { return false; } else { for (size_t i = 0; i < monotone_types_.size(); ++i) { if (monotone_types_[i] != 0) { return true; } } return false; } } inline int FeatureGroupNumBin(int group) const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper* FeatureBinMapper(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin* FeatureBin(int i) const { const int group = feature2group_[i]; return feature_groups_[group]->bin_data_.get(); } inline const Bin* FeatureGroupBin(int group) const { return feature_groups_[group]->bin_data_.get(); } inline bool FeatureGroupIsSparse(int group) const { return feature_groups_[group]->is_sparse_; } inline BinIterator* FeatureIterator(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator* FeatureGroupIterator(int group) const { return feature_groups_[group]->FeatureGroupIterator(); } inline double RealThreshold(int i, uint32_t threshold) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const { ordered_bins->resize(num_groups_); OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_groups_; ++i) { OMP_LOOP_EX_BEGIN(); ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin()); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } /*! * \brief Get meta data pointer * \return Pointer of meta data */ inline const Metadata& metadata() const { return metadata_; } /*! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /*! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_;} /*! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /*! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /*! \brief Get names of current data set */ inline const std::vector<std::string>& feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector<std::string>& feature_names) { if (feature_names.size() != static_cast<size_t>(num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std::vector<std::string>(feature_names); // replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto& feature_name: feature_names_){ if (feature_name.find(' ') != std::string::npos){ spaceInFeatureName = true; std::replace(feature_name.begin(), feature_name.end(), ' ', '_'); } } if (spaceInFeatureName){ Log::Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector<std::string> feature_infos() const { std::vector<std::string> bufs; for (int i = 0; i < num_total_features_; i++) { int fidx = used_feature_map_[i]; if (fidx == -1) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info()); } } return bufs; } /*! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /*! \brief Disable copy */ Dataset& operator=(const Dataset&) = delete; /*! \brief Disable copy */ Dataset(const Dataset&) = delete; private: std::string data_filename_; /*! \brief Store used features */ std::vector<std::unique_ptr<FeatureGroup>> feature_groups_; /*! \brief Mapper from real feature index to used index*/ std::vector<int> used_feature_map_; /*! \brief Number of used features*/ int num_features_; /*! \brief Number of total features*/ int num_total_features_; /*! \brief Number of total data*/ data_size_t num_data_; /*! \brief Store some label level data*/ Metadata metadata_; /*! \brief index of label column */ int label_idx_ = 0; /*! \brief Threshold for treating a feature as a sparse feature */ double sparse_threshold_; /*! \brief store feature names */ std::vector<std::string> feature_names_; /*! \brief store feature names */ static const char* binary_file_token; int num_groups_; std::vector<int> real_feature_idx_; std::vector<int> feature2group_; std::vector<int> feature2subfeature_; std::vector<uint64_t> group_bin_boundaries_; std::vector<int> group_feature_start_; std::vector<int> group_feature_cnt_; std::vector<int8_t> monotone_types_; bool is_finish_load_; }; } // namespace LightGBM #endif // LightGBM_DATA_H_
lis_matrix_dia.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <stdarg.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" /************************************************ * function | SOM | *-----------------------------+-----+ * lis_matrix_set | o | * lis_matrix_setDLU | o | * lis_matrix_malloc | o | * lis_matrix_elements_copy | o | * lis_matrix_transpose | o | * lis_matrix_split | o | * lis_matrix_merge | o | *-----------------------------+-----+-----+ * function |merge|split| *-----------------------------+-----+-----| * lis_matrix_convert | o | | * lis_matrix_copy | o | o | * lis_matrix_get_diagonal | o | o | * lis_matrix_scaling | o | o | * lis_matrix_scaling_symm | o | o | * lis_matrix_normf | o | o | * lis_matrix_sort | o | o | * lis_matrix_solve | xxx | o | * lis_matrix_solvet | xxx | o | ************************************************/ #undef __FUNC__ #define __FUNC__ "lis_matrix_set_dia" LIS_INT lis_matrix_set_dia(LIS_INT nnd, LIS_INT *index, LIS_SCALAR *value, LIS_MATRIX A) { LIS_INT err; LIS_DEBUG_FUNC_IN; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif A->index = index; A->value = value; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_DIA; A->nnd = nnd; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_setDLU_dia" LIS_INT lis_matrix_setDLU_dia(LIS_INT lnnd, LIS_INT unnd, LIS_SCALAR *diag, LIS_INT *lindex, LIS_SCALAR *lvalue, LIS_INT *uindex, LIS_SCALAR *uvalue, LIS_MATRIX A) { LIS_INT err; LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif A->L = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_dia::A->L"); if( A->L==NULL ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT)); return LIS_OUT_OF_MEMORY; } A->U = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_dia::A->U"); if( A->U==NULL ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT)); lis_matrix_DLU_destroy(A); return LIS_OUT_OF_MEMORY; } err = lis_matrix_diag_create(A->n,0,A->comm,&D); if( err ) { lis_matrix_DLU_destroy(A); return err; } lis_free(D->value); D->value = diag; A->D = D; A->L->nnd = lnnd; A->L->index = lindex; A->L->value = lvalue; A->U->nnd = unnd; A->U->index = uindex; A->U->value = uvalue; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_DIA; A->is_splited = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_malloc_dia" LIS_INT lis_matrix_malloc_dia(LIS_INT n, LIS_INT nnd, LIS_INT **index, LIS_SCALAR **value) { LIS_DEBUG_FUNC_IN; *index = NULL; *value = NULL; *index = (LIS_INT *)lis_malloc( n*nnd*sizeof(LIS_INT),"lis_matrix_malloc_dia::index" ); if( *index==NULL ) { LIS_SETERR_MEM(n*nnd*sizeof(LIS_INT)); lis_free2(2,*index,*value); return LIS_OUT_OF_MEMORY; } *value = (LIS_SCALAR *)lis_malloc( n*nnd*sizeof(LIS_SCALAR),"lis_matrix_malloc_dia::value" ); if( *value==NULL ) { LIS_SETERR_MEM(n*nnd*sizeof(LIS_SCALAR)); lis_free2(2,*index,*value); return LIS_OUT_OF_MEMORY; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_elements_copy_dia" LIS_INT lis_matrix_elements_copy_dia(LIS_INT n, LIS_INT nnd, LIS_INT *index, LIS_SCALAR *value, LIS_INT *o_index, LIS_SCALAR *o_value) { LIS_INT is,ie; LIS_INT nprocs,my_rank; LIS_DEBUG_FUNC_IN; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif memcpy(o_index,index,nnd*sizeof(LIS_INT)); #ifdef _OPENMP #pragma omp parallel private(is,ie,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie) memcpy(&o_value[is*nnd],&value[is*nnd],(ie-is)*nnd*sizeof(LIS_SCALAR)); } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_copy_dia" LIS_INT lis_matrix_copy_dia(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT err; LIS_INT i,n,nnd,lnnd,unnd; LIS_INT *index; LIS_INT *lindex; LIS_INT *uindex; LIS_SCALAR *value,*lvalue,*uvalue,*diag; LIS_DEBUG_FUNC_IN; n = Ain->n; if( Ain->is_splited ) { lnnd = Ain->L->nnd; unnd = Ain->U->nnd; lindex = NULL; uindex = NULL; diag = NULL; err = lis_matrix_malloc_dia(n,lnnd,&lindex,&lvalue); if( err ) { return err; } err = lis_matrix_malloc_dia(n,unnd,&uindex,&uvalue); if( err ) { lis_free2(5,diag,uindex,lindex,uvalue,lvalue); return err; } diag = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_matrix_copy_dia::diag"); if( diag==NULL ) { lis_free2(5,diag,uindex,lindex,uvalue,lvalue); return err; } #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { diag[i] = Ain->D->value[i]; } lis_matrix_elements_copy_dia(n,lnnd,Ain->L->index,Ain->L->value,lindex,lvalue); lis_matrix_elements_copy_dia(n,unnd,Ain->U->index,Ain->U->value,uindex,uvalue); err = lis_matrix_setDLU_dia(lnnd,unnd,diag,lindex,lvalue,uindex,uvalue,Aout); if( err ) { lis_free2(5,diag,uindex,lindex,uvalue,lvalue); return err; } } if( !Ain->is_splited || (Ain->is_splited && Ain->is_save) ) { index = NULL; value = NULL; nnd = Ain->nnd; err = lis_matrix_malloc_dia(n,nnd,&index,&value); if( err ) { return err; } lis_matrix_elements_copy_dia(n,nnd,Ain->index,Ain->value,index,value); err = lis_matrix_set_dia(nnd,index,value,Aout); if( err ) { lis_free2(2,index,value); return err; } } err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_get_diagonal_dia" LIS_INT lis_matrix_get_diagonal_dia(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j; LIS_INT n,nnd; #ifdef _OPENMP LIS_INT is,ie,my_rank,nprocs; #endif LIS_DEBUG_FUNC_IN; n = A->n; nnd = A->nnd; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { d[i] = A->D->value[i]; } } else { #ifdef _OPENMP nprocs = omp_get_max_threads(); for(j=0;j<nnd;j++) { if( A->index[j]==0 ) break; } #pragma omp parallel private(is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&d[is],&A->value[is*nnd+j*(ie-is)],(ie-is)*sizeof(LIS_SCALAR)); } #else for(j=0;j<nnd;j++) { if( A->index[j]==0 ) break; } for(i=0;i<n;i++) { d[i] = A->value[j*n+i]; } #endif } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_dia" LIS_INT lis_matrix_scaling_dia(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j,js,je,jj; LIS_INT n,np,nnd; #ifdef _OPENMP LIS_INT k,is,ie,ii; LIS_INT my_rank,nprocs; #endif LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank) { nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie) for(i=is;i<ie;i++) { A->D->value[i] = 1.0; } for(j=0;j<A->L->nnd;j++) { jj = A->L->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*A->L->nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->L->value[k + ii++] *= d[i]; } } for(j=0;j<A->U->nnd;j++) { jj = A->U->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*A->U->nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->U->value[k + ii++] *= d[i]; } } } #else for(i=0;i<n;i++) { A->D->value[i] = 1.0; } for(j=0;j<A->L->nnd;j++) { jj = A->L->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->L->value[j*n + i] *= d[i]; } } for(j=0;j<A->U->nnd;j++) { jj = A->U->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->U->value[j*n + i] *= d[i]; } } #endif } else { nnd = A->nnd; #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank) { nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie) for(j=0;j<nnd;j++) { jj = A->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->value[k + ii] *= d[i]; ii++; } } } #else for(j=0;j<nnd;j++) { jj = A->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->value[j*n + i] *= d[i]; } } #endif } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_symm_dia" LIS_INT lis_matrix_scaling_symm_dia(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j,js,je,jj; LIS_INT n,np,nnd; #ifdef _OPENMP LIS_INT k,is,ie,ii; LIS_INT my_rank,nprocs; #endif LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; nnd = A->nnd; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank) { nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie) for(i=is;i<ie;i++) { A->D->value[i] = 1.0; } for(j=0;j<A->L->nnd;j++) { jj = A->L->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*A->L->nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->L->value[k + ii++] *= d[i]*d[i+A->L->index[j]];; } } for(j=0;j<A->U->nnd;j++) { jj = A->U->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*A->U->nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->U->value[k + ii++] *= d[i]*d[i+A->U->index[j]];; } } } #else for(i=0;i<n;i++) { A->D->value[i] = 1.0; } for(j=0;j<A->L->nnd;j++) { jj = A->L->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->L->value[j*n + i] *= d[i]*d[i+A->L->index[j]];; } } for(j=0;j<A->U->nnd;j++) { jj = A->U->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->U->value[j*n + i] *= d[i]*d[i+A->U->index[j]];; } } #endif } else { nnd = A->nnd; #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank) { nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie) for(j=0;j<nnd;j++) { jj = A->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->value[k + ii] *= d[i]*d[i+A->index[j]]; ii++; } } } #else for(j=0;j<nnd;j++) { jj = A->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->value[j*n + i] *= d[i]*d[i+A->index[j]]; } } #endif } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_normf_dia" LIS_INT lis_matrix_normf_dia(LIS_MATRIX A, LIS_SCALAR *nrm) { LIS_INT i,j; LIS_INT n; LIS_SCALAR sum; LIS_DEBUG_FUNC_IN; n = A->n; sum = (LIS_SCALAR)0; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(i,j) #endif for(i=0; i<n; i++) { sum += A->D->value[i]*A->D->value[i]; for(j=A->L->index[i];j<A->L->index[i+1];j++) { sum += A->L->value[j]*A->L->value[j]; } for(j=A->U->index[i];j<A->U->index[i+1];j++) { sum += A->U->value[j]*A->U->value[j]; } } } else { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(i,j) #endif for(i=0; i<n; i++) { sum += A->value[i]*A->value[i]; for(j=A->index[i];j<A->index[i+1];j++) { sum += A->value[j]*A->value[j]; } } } *nrm = sqrt(sum); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_transpose_dia" LIS_INT lis_matrix_transpose_dia(LIS_MATRIX Ain, LIS_MATRIX *Aout) { LIS_DEBUG_FUNC_IN; /* err = lis_matrix_convert_dia2ccs(Ain,Aout);*/ (*Aout)->matrix_type = LIS_MATRIX_DIA; (*Aout)->status = LIS_MATRIX_DIA; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_split_dia" LIS_INT lis_matrix_split_dia(LIS_MATRIX A) { LIS_INT i,j,n,nnd; LIS_INT lnnd,unnd; #ifdef _OPENMP LIS_INT kl,ku,nprocs,my_rank,is,ie; #endif LIS_INT err; LIS_INT *lindex,*uindex; LIS_SCALAR *lvalue,*uvalue; LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; n = A->n; nnd = A->nnd; lnnd = 0; unnd = 0; D = NULL; lindex = NULL; lvalue = NULL; uindex = NULL; uvalue = NULL; for(j=0;j<nnd;j++) { if( A->index[j]<0 ) { lnnd++; } else if( A->index[j]>0 ) { unnd++; } } err = lis_matrix_LU_create(A); if( err ) { return err; } err = lis_matrix_malloc_dia(n,lnnd,&lindex,&lvalue); if( err ) { return err; } err = lis_matrix_malloc_dia(n,unnd,&uindex,&uvalue); if( err ) { lis_free2(4,lindex,lvalue,uindex,uvalue); return err; } err = lis_matrix_diag_duplicateM(A,&D); if( err ) { lis_free2(4,lindex,lvalue,uindex,uvalue); return err; } #ifdef _OPENMP kl = 0; ku = 0; nprocs = omp_get_max_threads(); for(j=0;j<nnd;j++) { if( A->index[j]<0 ) { lindex[kl] = A->index[j]; #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&lvalue[is*lnnd+kl*(ie-is)],&A->value[is*nnd+j*(ie-is)],(ie-is)*sizeof(LIS_SCALAR)); } kl++; } else if( A->index[j]>0 ) { uindex[ku] = A->index[j]; #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&uvalue[is*unnd+ku*(ie-is)],&A->value[is*nnd+j*(ie-is)],(ie-is)*sizeof(LIS_SCALAR)); } ku++; } else { #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(i=is;i<ie;i++) { D->value[i] = A->value[is*nnd+j*(ie-is)+i-is]; } } } } #else lnnd = 0; unnd = 0; for(j=0;j<nnd;j++) { if( A->index[j]<0 ) { lindex[lnnd] = A->index[j]; for(i=0;i<n;i++) { lvalue[lnnd*n+i] = A->value[j*n+i]; } lnnd++; } else if( A->index[j]>0 ) { uindex[unnd] = A->index[j]; for(i=0;i<n;i++) { uvalue[unnd*n+i] = A->value[j*n+i]; } unnd++; } else { for(i=0;i<n;i++) { D->value[i] = A->value[j*n+i]; } } } #endif A->L->nnd = lnnd; A->L->index = lindex; A->L->value = lvalue; A->U->nnd = unnd; A->U->index = uindex; A->U->value = uvalue; A->D = D; A->is_splited = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_merge_dia" LIS_INT lis_matrix_merge_dia(LIS_MATRIX A) { LIS_INT i,j,k,n,is; #ifdef _OPENMP LIS_INT nprocs,my_rank,ie; #endif LIS_INT nnd; LIS_INT err; LIS_INT *index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = A->n; is = A->is; index = NULL; value = NULL; nnd = A->L->nnd + A->U->nnd + 1; err = lis_matrix_malloc_dia(n,nnd,&index,&value); if( err ) { return err; } #ifdef _OPENMP nprocs = omp_get_max_threads(); k = 0; for(j=0;j<A->L->nnd;j++) { index[k] = A->L->index[j]; #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&value[is*nnd+k*(ie-is)],&A->L->value[is*A->L->nnd+j*(ie-is)],(ie-is)*sizeof(LIS_SCALAR)); } k++; } index[k] = 0; #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&value[is*nnd+k*(ie-is)],&A->D->value[is],(ie-is)*sizeof(LIS_SCALAR)); } k++; for(j=0;j<A->U->nnd;j++) { index[k] = A->U->index[j]; #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&value[is*nnd+k*(ie-is)],&A->U->value[is*A->U->nnd+j*(ie-is)],(ie-is)*sizeof(LIS_SCALAR)); } k++; } #else k = 0; for(j=0;j<A->L->nnd;j++) { index[k] = A->L->index[j]; for(i=0;i<n;i++) { value[k*n+i] = A->L->value[j*n+i]; } k++; } index[k] = 0; for(i=0;i<n;i++) { value[k*n+i] = A->D->value[i]; } k++; for(j=0;j<A->U->nnd;j++) { index[k] = A->U->index[j]; for(i=0;i<n;i++) { value[k*n+i] = A->U->value[j*n+i]; } k++; } #endif A->nnd = nnd; A->value = value; A->index = index; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_sort_dia" LIS_INT lis_matrix_sort_dia(LIS_MATRIX A) { LIS_INT i,n; LIS_DEBUG_FUNC_IN; if( !A->is_sorted ) { n = A->n; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { lis_sort_id(A->L->ptr[i],A->L->ptr[i+1]-1,A->L->index,A->L->value); lis_sort_id(A->U->ptr[i],A->U->ptr[i+1]-1,A->U->index,A->U->value); } } else { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { lis_sort_id(A->ptr[i],A->ptr[i+1]-1,A->index,A->value); } } A->is_sorted = LIS_TRUE; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solve_dia" LIS_INT lis_matrix_solve_dia(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,n; LIS_SCALAR t; LIS_SCALAR *b,*x; LIS_DEBUG_FUNC_IN; n = A->n; b = B->value; x = X->value; switch(flag) { case LIS_MATRIX_LOWER: for(i=0;i<n;i++) { t = b[i]; for(j=0;j<A->L->nnd;j++) { if( i+A->L->index[j] >= 0 ) t -= A->L->value[j*n + i] * x[i + A->L->index[j]]; } x[i] = t * A->WD->value[i]; } break; case LIS_MATRIX_UPPER: for(i=n-1;i>=0;i--) { t = b[i]; for(j=0;j<A->U->nnd;j++) { if( i+A->U->index[j] < n ) t -= A->U->value[j*n + i] * x[i + A->U->index[j]]; } x[i] = t * A->WD->value[i]; } break; case LIS_MATRIX_SSOR: for(i=0;i<n;i++) { t = b[i]; for(j=0;j<A->L->nnd;j++) { if( i+A->L->index[j] >= 0 ) t -= A->L->value[j*n + i] * x[i + A->L->index[j]]; } x[i] = t * A->WD->value[i]; } for(i=n-1;i>=0;i--) { t = 0.0; for(j=0;j<A->U->nnd;j++) { if( i+A->U->index[j] < n ) t += A->U->value[j*n + i] * x[i + A->U->index[j]]; } x[i] -= t * A->WD->value[i]; } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solvet_dia" LIS_INT lis_matrix_solvet_dia(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,n; LIS_SCALAR t; LIS_SCALAR *b,*x; LIS_DEBUG_FUNC_IN; n = A->n; b = B->value; x = X->value; lis_vector_copy(B,X); switch(flag) { case LIS_MATRIX_LOWER: for(i=0;i<n;i++) { x[i] = x[i] * A->WD->value[i]; for(j=0;j<A->U->nnd;j++) { if( i+A->U->index[j] < n ) x[i + A->U->index[j]] -= A->U->value[j*n + i] * x[i]; } } break; case LIS_MATRIX_UPPER: for(i=n-1;i>=0;i--) { x[i] = x[i] * A->WD->value[i]; for(j=0;j<A->L->nnd;j++) { if( i+A->L->index[j] >= 0 ) x[i + A->L->index[j]] -= A->L->value[j*n + i] * x[i]; } } break; case LIS_MATRIX_SSOR: for(i=0;i<n;i++) { t = x[i] * A->WD->value[i]; for(j=0;j<A->U->nnd;j++) { if( i+A->U->index[j] < n ) x[i + A->U->index[j]] -= A->U->value[j*n + i] * t; } } for(i=n-1;i>=0;i--) { x[i] = x[i] * A->WD->value[i]; t = x[i]; for(j=0;j<A->L->nnd;j++) { if( i+A->L->index[j] >= 0 ) x[i + A->L->index[j]] -= A->L->value[j*n + i] * t; } } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_crs2dia" LIS_INT lis_matrix_convert_crs2dia(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,jj,k; LIS_INT err; LIS_INT n,nnz,nnd,nprocs,my_rank; LIS_INT is,ie; LIS_INT *iw; LIS_INT *index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = Ain->n; nnz = Ain->nnz; index = NULL; value = NULL; iw = NULL; iw = (LIS_INT *)lis_malloc( nnz*sizeof(LIS_INT),"lis_matrix_convert_crs2dia::iw" ); if( iw==NULL ) { LIS_SETERR_MEM(nnz*sizeof(LIS_INT)); return LIS_ERR_OUT_OF_MEMORY; } lis_matrix_sort_crs(Ain); #ifdef _OPENMP #pragma omp parallel for private(i,j) #endif for(i=0;i<n;i++) { for(j=Ain->ptr[i];j<Ain->ptr[i+1];j++) { iw[j] = Ain->index[j] - i; } } lis_sort_i(0,nnz-1,iw); nnd = 1; k = iw[0]; for(i=1;i<nnz;i++) { if( k!=iw[i] ) { k = iw[i]; nnd++; } } err = lis_matrix_malloc_dia(n,nnd,&index,&value); if( err ) { lis_free(iw); return err; } /* convert dia */ k = iw[0]; index[0] = k; j = 1; for(i=1;i<nnz;i++) { if( k!=iw[i] ) { k = iw[i]; index[j] = k; j++; } } #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); nprocs = omp_get_max_threads(); #else my_rank = 0; nprocs = 1; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memset(&value[is*nnd],0,(ie-is)*nnd*sizeof(LIS_SCALAR)); for(i=is;i<ie;i++) { k = 0; for(j=Ain->ptr[i];j<Ain->ptr[i+1];j++) { jj=Ain->index[j] - i; while( jj!=index[k] ) k++; value[is*nnd + k*(ie-is) + i-is] = Ain->value[j]; } } } err = lis_matrix_set_dia(nnd,index,value,Aout); if( err ) { lis_free2(3,index,value,iw); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_free(iw); lis_matrix_storage_destroy(Aout); return err; } lis_free(iw); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_dia2crs" LIS_INT lis_matrix_convert_dia2crs(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,jj,k,l; LIS_INT err,js,je; LIS_INT n,np,nnz,nnd,is,ie,nprocs,my_rank; LIS_INT *iw; LIS_INT *ptr,*index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = Ain->n; np = Ain->np; nnd = Ain->nnd; is = Ain->is; ie = Ain->ie; ptr = NULL; index = NULL; value = NULL; iw = NULL; iw = (LIS_INT *)lis_malloc( (n+1)*sizeof(LIS_INT),"lis_matrix_convert_dia2crs::iw" ); if( iw==NULL ) { LIS_SETERR_MEM((n+1)*sizeof(LIS_INT)); return LIS_ERR_OUT_OF_MEMORY; } iw[0] = 0; #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,js,je,jj,my_rank) #endif { #ifdef _OPENMP nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); #else my_rank = 0; nprocs = 1; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memset(&iw[is+1],0,(ie-is)*sizeof(LIS_INT)); k = ie-is; for(j=0;j<nnd;j++) { jj = Ain->index[j]; js = _max(is,-jj) -is; #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif je -= is; for(i=js;i<je;i++) { if( Ain->value[is*nnd + j*k + i]!=(LIS_SCALAR)0.0 ) { iw[i+is+1]++; } } } } for(i=0;i<n;i++) { iw[i+1] += iw[i]; } nnz = iw[n]; err = lis_matrix_malloc_crs(n,nnz,&ptr,&index,&value); if( err ) { lis_free2(4,ptr,index,value,iw); return err; } /* convert crs */ ptr[0] = 0; #ifdef _OPENMP #pragma omp parallel private(i,j,k,l,is,ie,js,je,jj,my_rank) #endif { #ifdef _OPENMP nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); #else my_rank = 0; nprocs = 1; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); l = ie-is; for(i=is;i<ie;i++) { ptr[i+1] = iw[i+1]; } for(j=0;j<nnd;j++) { jj = Ain->index[j]; js = _max(is,-jj) - is; #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif je -= is; for(i=js;i<je;i++) { if( Ain->value[is*nnd + j*l + i]!=(LIS_SCALAR)0.0 ) { k = iw[i+is]++; value[k] = Ain->value[is*nnd + j*l + i]; index[k] = i+is + jj; } } } } err = lis_matrix_set_crs(nnz,ptr,index,value,Aout); if( err ) { lis_free2(4,ptr,index,value,iw); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_free(iw); lis_matrix_storage_destroy(Aout); return err; } lis_free(iw); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; }
simd-clone-2.h
struct S { int s; #pragma omp declare simd notinbranch int f0 (int x); #pragma omp declare simd notinbranch uniform(this) int f1 (int x); #pragma omp declare simd notinbranch linear(this:sizeof(this)/sizeof(this)) int f2 (int x); }; struct T { int t[64]; #pragma omp declare simd aligned(this:32) uniform(this) linear(x) int f3 (int x); };
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-2,3)),ceild(16*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(8*t1+Ny+13,24)),floord(16*t2+Ny+12,24)),floord(16*t1-16*t2+Nz+Ny+11,24));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-508,512)),ceild(24*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(8*t1+Nx+13,512)),floord(16*t2+Nx+12,512)),floord(24*t3+Nx+20,512)),floord(16*t1-16*t2+Nz+Nx+11,512));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),24*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),24*t3+22),512*t4+510),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
OnDiscMSExperiment.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2017. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #ifndef OPENMS_KERNEL_ONDISCMSEXPERIMENT_H #define OPENMS_KERNEL_ONDISCMSEXPERIMENT_H #include <OpenMS/INTERFACES/DataStructures.h> #include <OpenMS/KERNEL/MSExperiment.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/KERNEL/MSChromatogram.h> #include <OpenMS/METADATA/ExperimentalSettings.h> #include <OpenMS/FORMAT/IndexedMzMLFile.h> #include <OpenMS/FORMAT/MzMLFile.h> #include <vector> #include <algorithm> #include <limits> #include <boost/shared_ptr.hpp> namespace OpenMS { /** @brief Representation of a mass spectrometry experiment on disk. @ingroup Kernel @note This implementation is @a not thread-safe since it keeps internally a single file access pointer which it moves when accessing a specific data item. Please provide a separate copy to each thread, e.g. @code #pragma omp parallel for firstprivate(ondisc_map) @endcode */ class OnDiscMSExperiment { typedef ChromatogramPeak ChromatogramPeakT; typedef Peak1D PeakT; public: /** @brief Constructor This initializes the object, use openFile to open a file. */ OnDiscMSExperiment() {} /** @brief Open a specific file on disk. This tries to read the indexed mzML by parsing the index and then reading the meta information into memory. @return Whether the parsing of the file was successful (if false, the file most likely was not an indexed mzML file) */ bool openFile(const String& filename, bool skipMetaData = false) { filename_ = filename; indexed_mzml_file_.openFile(filename); if (filename != "" && !skipMetaData) { loadMetaData_(filename); } return indexed_mzml_file_.getParsingSuccess(); } /// Copy constructor OnDiscMSExperiment(const OnDiscMSExperiment& source) : filename_(source.filename_), indexed_mzml_file_(source.indexed_mzml_file_), meta_ms_experiment_(source.meta_ms_experiment_) { } /** @brief Equality operator This only checks whether the underlying file is the same and the parsed meta-information is the same. Note that the file reader (e.g. the std::ifstream of the file) might be in a different state. */ bool operator==(const OnDiscMSExperiment& rhs) const { // check if file and meta information is the same return filename_ == rhs.filename_ && (*meta_ms_experiment_) == (*rhs.meta_ms_experiment_); // do not check if indexed_mzml_file_ is equal -> they have the same filename... } /// Inequality operator bool operator!=(const OnDiscMSExperiment& rhs) const { return !(operator==(rhs)); } /** @brief Checks if all spectra are sorted with respect to ascending RT Note that we cannot check whether all spectra are sorted (except if we were to load them all and check). */ bool isSortedByRT() const { return meta_ms_experiment_->isSorted(false); } /// alias for getNrSpectra inline Size size() const { return getNrSpectra(); } /// returns whether spectra are empty inline bool empty() const { return indexed_mzml_file_.getNrSpectra() == 0; } /// get the total number of spectra available inline Size getNrSpectra() const { return indexed_mzml_file_.getNrSpectra(); } /// get the total number of chromatograms available inline Size getNrChromatograms() const { return indexed_mzml_file_.getNrChromatograms(); } /// returns the meta information of this experiment (const access) boost::shared_ptr<const ExperimentalSettings> getExperimentalSettings() const { return boost::static_pointer_cast<const ExperimentalSettings>(meta_ms_experiment_); } /// alias for getSpectrum inline MSSpectrum operator[](Size n) { return getSpectrum(n); } /** @brief returns a single spectrum TODO: make this more efficient by reducing the copying */ MSSpectrum getSpectrum(Size id) { OpenMS::Interfaces::SpectrumPtr sptr = indexed_mzml_file_.getSpectrumById(static_cast<int>(id)); MSSpectrum spectrum(meta_ms_experiment_->operator[](id)); // recreate a spectrum from the data arrays! OpenMS::Interfaces::BinaryDataArrayPtr mz_arr = sptr->getMZArray(); OpenMS::Interfaces::BinaryDataArrayPtr int_arr = sptr->getIntensityArray(); spectrum.reserve(mz_arr->data.size()); for (Size i = 0; i < mz_arr->data.size(); i++) { PeakT p; p.setMZ(mz_arr->data[i]); p.setIntensity(int_arr->data[i]); spectrum.push_back(p); } return spectrum; } /** @brief returns a single spectrum */ OpenMS::Interfaces::SpectrumPtr getSpectrumById(Size id) { return indexed_mzml_file_.getSpectrumById(id); } /** @brief returns a single chromatogram TODO: make this more efficient by reducing the copying */ MSChromatogram getChromatogram(Size id) { OpenMS::Interfaces::ChromatogramPtr cptr = indexed_mzml_file_.getChromatogramById(static_cast<int>(id)); MSChromatogram chromatogram(meta_ms_experiment_->getChromatogram(id)); // recreate a chromatogram from the data arrays! OpenMS::Interfaces::BinaryDataArrayPtr rt_arr = cptr->getTimeArray(); OpenMS::Interfaces::BinaryDataArrayPtr int_arr = cptr->getIntensityArray(); chromatogram.reserve(rt_arr->data.size()); for (Size i = 0; i < rt_arr->data.size(); i++) { ChromatogramPeakT p; p.setRT(rt_arr->data[i]); p.setIntensity(int_arr->data[i]); chromatogram.push_back(p); } return chromatogram; } /** @brief returns a single chromatogram */ OpenMS::Interfaces::ChromatogramPtr getChromatogramById(Size id) { return indexed_mzml_file_.getChromatogramById(id); } ///sets whether to skip some XML checks and be fast instead void setSkipXMLChecks(bool skip) { indexed_mzml_file_.setSkipXMLChecks(skip); } private: /// Private Assignment operator -> we cannot copy file streams in IndexedMzMLFile OnDiscMSExperiment& operator=(const OnDiscMSExperiment& /* source */); void loadMetaData_(const String& filename) { meta_ms_experiment_ = boost::shared_ptr< PeakMap >(new PeakMap); MzMLFile f; PeakFileOptions options = f.getOptions(); options.setFillData(false); f.setOptions(options); f.load(filename, *meta_ms_experiment_.get()); } protected: /// The filename of the underlying data file String filename_; /// The index of the underlying data file IndexedMzMLFile indexed_mzml_file_; /// The meta-data boost::shared_ptr<PeakMap> meta_ms_experiment_; }; typedef OpenMS::OnDiscMSExperiment OnDiscPeakMap; } // namespace OpenMS #endif // OPENMS_KERNEL_ONDISCMSEXPERIMENT_H
builder.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef BUILDER_H_ #define BUILDER_H_ #include <algorithm> #include <cinttypes> #include <fstream> #include <functional> #include <type_traits> #include <utility> #include "command_line.h" #include "generator.h" #include "graph.h" #include "platform_atomics.h" #include "pvector.h" #include "reader.h" #include "timer.h" #include "util.h" /* GAP Benchmark Suite Class: BuilderBase Author: Scott Beamer Given arguements from the command line (cli), returns a built graph - MakeGraph() will parse cli and obtain edgelist and call MakeGraphFromEL(edgelist) to perform actual graph construction - edgelist can be from file (reader) or synthetically generated (generator) - Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h) */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, bool invert = true> class BuilderBase { typedef EdgePair<NodeID_, DestID_> Edge; typedef pvector<Edge> EdgeList; const CLBase &cli_; bool symmetrize_; bool needs_weights_; int64_t num_nodes_ = -1; public: explicit BuilderBase(const CLBase &cli) : cli_(cli) { symmetrize_ = cli_.symmetrize(); needs_weights_ = !std::is_same<NodeID_, DestID_>::value; } DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) { return e.u; } DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) { return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w); } NodeID_ FindMaxNodeID(const EdgeList &el) { NodeID_ max_seen = 0; #pragma omp parallel for reduction(max : max_seen) for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; max_seen = std::max(max_seen, e.u); max_seen = std::max(max_seen, (NodeID_) e.v); } return max_seen; } pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) { pvector<NodeID_> degrees(num_nodes_, 0); //exit(0); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) fetch_and_add(degrees[e.u], 1); if (symmetrize_ || (!symmetrize_ && transpose)) fetch_and_add(degrees[(NodeID_) e.v], 1); } printf("Degrees capacity: %lu\n", degrees.capacity()); printf("Degrees size: %lu\n", degrees.size()); return degrees; } static pvector<SGOffset> PrefixSum(const pvector<NodeID_> &degrees) { pvector<SGOffset> sums(degrees.size() + 1); SGOffset total = 0; for (size_t n=0; n < degrees.size(); n++) { sums[n] = total; total += degrees[n]; } sums[degrees.size()] = total; return sums; } static pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<SGOffset> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset lsum = 0; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<SGOffset> bulk_prefix(num_blocks+1); SGOffset total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<SGOffset> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } // Removes self-loops and redundant edges // Side effect: neighbor IDs will be sorted void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose, DestID_*** sq_index, DestID_** sq_neighs) { pvector<NodeID_> diffs(g.num_nodes()); DestID_ *n_start, *n_end; #pragma omp parallel for private(n_start, n_end) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) { n_start = g.in_neigh(n).begin(); n_end = g.in_neigh(n).end(); } else { n_start = g.out_neigh(n).begin(); n_end = g.out_neigh(n).end(); } std::sort(n_start, n_end); DestID_ *new_end = std::unique(n_start, n_end); new_end = std::remove(n_start, new_end, n); diffs[n] = new_end - n_start; } pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]]; *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); #pragma omp parallel for private(n_start) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) n_start = g.in_neigh(n).begin(); else n_start = g.out_neigh(n).begin(); std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); } } CSRGraph<NodeID_, DestID_, invert> SquishGraph( const CSRGraph<NodeID_, DestID_, invert> &g) { DestID_ **out_index, *out_neighs, **in_index, *in_neighs; SquishCSR(g, false, &out_index, &out_neighs); if (g.directed()) { if (invert) SquishCSR(g, true, &in_index, &in_neighs); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs, in_index, in_neighs); } else { return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs); } } /* Graph Bulding Steps (for CSR): - Read edgelist once to determine vertex degrees (CountDegrees) - Determine vertex offsets by a prefix sum (ParallelPrefixSum) - Allocate storage and set points according to offsets (GenIndex) - Copy edges into storage */ void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index, DestID_** neighs) { pvector<NodeID_> degrees = CountDegrees(el, transpose); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); *neighs = new DestID_[offsets[num_nodes_]]; *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; if (symmetrize_ || (!symmetrize_ && transpose)) (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] = GetSource(e); } } CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) { DestID_ **index = nullptr, **inv_index = nullptr; DestID_ *neighs = nullptr, *inv_neighs = nullptr; Timer t; t.Start(); if (num_nodes_ == -1) num_nodes_ = FindMaxNodeID(el)+1; if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el); MakeCSR(el, false, &index, &neighs); if (!symmetrize_ && invert) { printf("new flag\n"); MakeCSR(el, true, &inv_index, &inv_neighs); } t.Stop(); PrintTime("Build Time", t.Seconds()); if (symmetrize_) return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs); else return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs, inv_index, inv_neighs); } CSRGraph<NodeID_, DestID_, invert> MakeGraph() { CSRGraph<NodeID_, DestID_, invert> g; { // extra scope to trigger earlier deletion of el (save memory) EdgeList el; if (cli_.filename() != "") { Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename()); if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) { return r.ReadSerializedGraph(); } else { el = r.ReadFile(needs_weights_); } } else if (cli_.scale() != -1) { Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree()); el = gen.GenerateEL(cli_.uniform()); } //for (auto wait : el) //printf("PE %d | u = %lu v = %lu w = %lu\n", 0, wait.u, wait.v.v, wait.v.w); g = MakeGraphFromEL(el); //printf("EL Size: %lu | El[0]: (%d, %d)", el.size(), (*(el.begin())).u, (*(el.begin())).v); } return SquishGraph(g); } // Relabels (and rebuilds) graph by order of decreasing degree static CSRGraph<NodeID_, DestID_, invert> RelabelByDegree( const CSRGraph<NodeID_, DestID_, invert> &g) { if (g.directed()) { std::cout << "Cannot relabel directed graph" << std::endl; std::exit(-11); } Timer t; t.Start(); typedef std::pair<int64_t, NodeID_> degree_node_p; pvector<degree_node_p> degree_id_pairs(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) degree_id_pairs[n] = std::make_pair(g.out_degree(n), n); std::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_node_p>()); pvector<NodeID_> degrees(g.num_nodes()); pvector<NodeID_> new_ids(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[n] = degree_id_pairs[n].first; new_ids[degree_id_pairs[n].second] = n; } for (int i = 0; i < g.num_nodes(); i++) printf("PE %d | degrees[%d] = %d, new_ids[%d] = %d\n", 0, i, degrees[i], i, new_ids[i]); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for for (NodeID_ u=0; u < g.num_nodes(); u++) { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("Relabel", t.Seconds()); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs); } }; #endif // BUILDER_H_
ompatomic.c
#include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { int thread_count = 0; #pragma omp parallel { #pragma omp atomic thread_count += 1; } printf("Counted: %d threads\n", thread_count); }
SoaDistanceTableAB.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_DTDIMPL_AB_H #define QMCPLUSPLUS_DTDIMPL_AB_H #include "Utilities/FairDivide.h" namespace qmcplusplus { /**@ingroup nnlist * @brief A derived classe from DistacneTableData, specialized for AB using a transposed form */ template<typename T, unsigned D, int SC> struct SoaDistanceTableAB : public DTD_BConds<T, D, SC>, public DistanceTableData { SoaDistanceTableAB(const ParticleSet& source, ParticleSet& target) : DTD_BConds<T, D, SC>(source.Lattice), DistanceTableData(source, target) { resize(source.getTotalNum(), target.getTotalNum()); } void resize(int ns, int nt) { N_sources = ns; N_targets = nt; if (N_sources * N_targets == 0) return; // initialize memory containers and views const int Nsources_padded = getAlignedSize<T>(N_sources); distances_.resize(N_targets); displacements_.resize(N_targets); for (int i = 0; i < N_targets; ++i) { distances_[i].resize(Nsources_padded); displacements_[i].resize(Nsources_padded); } // The padding of temp_r_ and temp_dr_ is necessary for the memory copy in the update function // temp_r_ is padded explicitly while temp_dr_ is padded internally temp_r_.resize(Nsources_padded); temp_dr_.resize(N_sources); } SoaDistanceTableAB() = delete; SoaDistanceTableAB(const SoaDistanceTableAB&) = delete; /** evaluate the full table */ inline void evaluate(ParticleSet& P) { #pragma omp parallel { int first, last; FairDivideAligned(N_sources, getAlignment<T>(), omp_get_num_threads(), omp_get_thread_num(), first, last); //be aware of the sign of Displacement for (int iat = 0; iat < N_targets; ++iat) DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(), distances_[iat].data(), displacements_[iat], first, last); } } ///evaluate the temporary pair relations inline void move(const ParticleSet& P, const PosType& rnew, const IndexType iat, bool prepare_old) { DTD_BConds<T, D, SC>::computeDistances(rnew, Origin->getCoordinates().getAllParticlePos(), temp_r_.data(), temp_dr_, 0, N_sources); // If the full table is not ready all the time, overwrite the current value. // If this step is missing, DT values can be undefined in case a move is rejected. if (!need_full_table_) DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(), distances_[iat].data(), displacements_[iat], 0, N_sources); } ///update the stripe for jat-th particle inline void update(IndexType iat, bool partial_update) { std::copy_n(temp_r_.data(), N_sources, distances_[iat].data()); for (int idim = 0; idim < D; ++idim) std::copy_n(temp_dr_.data(idim), N_sources, displacements_[iat].data(idim)); } size_t get_neighbors(int iat, RealType rcut, int* restrict jid, RealType* restrict dist, PosType* restrict displ) const { constexpr T cminus(-1); size_t nn = 0; for (int jat = 0; jat < N_targets; ++jat) { const RealType rij = distances_[jat][iat]; if (rij < rcut) { //make the compact list jid[nn] = jat; dist[nn] = rij; displ[nn] = cminus * displacements_[jat][iat]; nn++; } } return nn; } int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const { RealType min_dist = std::numeric_limits<RealType>::max(); int index = -1; if (newpos) { for (int jat = 0; jat < N_sources; ++jat) if (temp_r_[jat] < min_dist) { min_dist = temp_r_[jat]; index = jat; } if (index >= 0) { r = min_dist; dr = temp_dr_[index]; } } else { for (int jat = 0; jat < N_sources; ++jat) if (distances_[iat][jat] < min_dist) { min_dist = distances_[iat][jat]; index = jat; } if (index >= 0) { r = min_dist; dr = displacements_[iat][index]; } } return index; } size_t get_neighbors(int iat, RealType rcut, RealType* restrict dist) const { size_t nn = 0; for (int jat = 0; jat < N_targets; ++jat) { const RealType rij = distances_[jat][iat]; if (rij < rcut) { //make the compact list dist[nn] = rij; nn++; } } return nn; } }; } // namespace qmcplusplus #endif
edgelist.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.), Narayanan Sundaram (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_EDGELIST_H_ #define SRC_EDGELIST_H_ #include <string> template <typename T> struct edge_t { edge_t() {} edge_t(int _src, int _dst, T _val) { src = _src; dst = _dst; val = _val; } int src; int dst; T val; }; template <typename T> struct edgelist_t { edge_t<T>* edges; int m; int n; int nnz; edgelist_t() : m(0), n(0), nnz(0), edges(nullptr) {} edgelist_t(int _m, int _n, int _nnz) { m = _m; n = _n; nnz = _nnz; if(nnz > 0) { edges = reinterpret_cast<edge_t<T>*>(_mm_malloc((size_t)nnz * sizeof(edge_t<T>), 64)); } } edgelist_t(edge_t<T>* edges, int m, int n, int nnz) : edges(edges), m(m), n(n), nnz(nnz) {} void clear() { if (nnz > 0) { _mm_free(edges); } edges = nullptr; nnz = 0; m = 0; n = 0; } }; template <typename T> struct tedge_t { int src; int dst; int tile_id; T val; }; template<typename T> bool readLine (FILE * ifile, int * src, int * dst, T * val, bool binaryformat=true, bool edgeweights=true) { if(binaryformat) { auto fread_bytes = fread(src, sizeof(int), 1, ifile); if (feof(ifile)) return false; assert(fread_bytes == 1); fread_bytes = fread(dst, sizeof(int), 1, ifile); if (feof(ifile)) return false; assert(fread_bytes == 1); if (edgeweights) { fread_bytes = fread(val, sizeof(T), 1, ifile); if (feof(ifile)) return false; assert(fread_bytes == 1); } else { *val = (T)(1); } } else { if (edgeweights) { int ret; if (std::is_same<T, float>::value) { ret = fscanf(ifile, "%d %d %f", src, dst, val); if (ret != 3) return false; } else if (std::is_same<T, double>::value) { ret = fscanf(ifile, "%d %d %lf", src, dst, val); if (ret != 3) return false; } else if (std::is_same<T, int>::value) { ret = fscanf(ifile, "%d %d %d", src, dst, val); if (ret != 3) return false; } else if (std::is_same<T, unsigned int>::value) { ret = fscanf(ifile, "%d %d %u", src, dst, val); if (ret != 3) return false; }else { std::cout << "Data type not supported (read)" << std::endl; } } else { int ret = fscanf(ifile, "%d %d", src, dst); if (ret == 2) { *val = (T)(1); } else return false; } if (feof(ifile)) return false; } return true; } template<typename T> void get_maxid_and_nnz(FILE* fp, int* m, int* n, unsigned long int* nnz, bool* symmetric, bool *pattern, bool binaryformat=true, bool header=true, bool edgeweights=true) { if (header) { int tmp_[3]; if (binaryformat) { auto fread_bytes = fread(tmp_, sizeof(int), 3, fp); assert(fread_bytes == 3); *m = tmp_[0]; *n = tmp_[1]; *nnz = tmp_[2]; } else { unsigned long position; char line[256]; // read matrixmarket header fflush(fp); position = ftell(fp); fgets(line, 256, fp); *symmetric = strstr(line, "symmetric") != NULL; *pattern = strstr(line, "pattern") != NULL; edgeweights = *pattern; // skip all the comments line[0] = '%'; fseek(fp, position, SEEK_SET); while (line[0] == '%') { fflush(fp); position = ftell(fp); fgets(line, 256, fp); } fseek(fp, position, SEEK_SET); int ret = fscanf(fp, "%d %d %u", &(tmp_[0]), &(tmp_[1]), &(tmp_[2])); assert(ret == 3); *m = tmp_[0]; *n = tmp_[1]; *nnz = tmp_[2]; } return; } else { //no header unsigned long nnz_ = 0; int tempsrc, tempdst; int maxm = 0; int maxn = 0; T tempval; while(true) { if(feof(fp)) { break; } if (!readLine<T>(fp, &tempsrc, &tempdst, &tempval, binaryformat, edgeweights)) { break; } maxm = (maxm > tempsrc)?(maxm):(tempsrc); maxn = (maxn > tempdst)?(maxn):(tempdst); nnz_++; } *m = maxm; *n = maxn; *nnz = nnz_; } } template<typename T> void writeLine (FILE* ofile, int src, int dst, T val, bool binaryformat=true, bool edgeweights=true) { if (binaryformat) { auto fwrite_bytes = fwrite(&src, sizeof(int), 1, ofile); assert(fwrite_bytes == 1); fwrite_bytes = fwrite(&dst, sizeof(int), 1, ofile); assert(fwrite_bytes == 1); if (edgeweights) { fwrite_bytes = fwrite(&val, sizeof(T), 1, ofile); assert(fwrite_bytes == 1); } } else { if (edgeweights) { if (std::is_same<T, float>::value) { fprintf(ofile, "%d %d %.8f\n", src, dst, val); } else if (std::is_same<T, double>::value) { fprintf(ofile, "%d %d %.15lf\n", src, dst, val); } else if (std::is_same<T, int>::value) { fprintf(ofile, "%d %d %d\n", src, dst, val); } else if (std::is_same<T, unsigned int>::value) { fprintf(ofile, "%d %d %u\n", src, dst, val); } else { std::cout << "Data type not supported (write)\n"; } } else { fprintf(ofile, "%d %d\n", src, dst); } } } template <typename T> void write_edgelist(const char* dir, const edgelist_t<T> & edgelist, bool binaryformat=true, bool header=true, bool edgeweights=true) { int global_nrank = get_global_nrank(); int global_myrank = get_global_myrank(); std::stringstream fname_ss; fname_ss << dir << global_myrank; printf("Writing file: %s\n", fname_ss.str().c_str()); FILE * fp; if (binaryformat) { fp = fopen(fname_ss.str().c_str(), "wb"); if (header) { auto fwrite_bytes = fwrite(&(edgelist.m), sizeof(int), 1, fp); assert(fwrite_bytes == 1); fwrite_bytes = fwrite(&(edgelist.n), sizeof(int), 1, fp); assert(fwrite_bytes == 1); fwrite_bytes = fwrite(&(edgelist.nnz), sizeof(int), 1, fp); assert(fwrite_bytes == 1); } } else { fp = fopen(fname_ss.str().c_str(), "w"); if (header) { fprintf(fp, "%d %d %u\n", edgelist.m, edgelist.n, edgelist.nnz); } } for(auto i = 0 ; i < edgelist.nnz ; i++) { writeLine<T>(fp, edgelist.edges[i].src, edgelist.edges[i].dst, edgelist.edges[i].val, binaryformat, edgeweights); } fclose(fp); } template <typename T> void load_edgelist(const char* dir, edgelist_t<T>* edgelist, bool single=true, bool binaryformat=true, bool header=true, bool edgeweights=true) { int global_nrank = get_global_nrank(); int global_myrank = get_global_myrank(); edgelist->m = 0; edgelist->n = 0; edgelist->nnz = 0; bool symmetric, pattern; for(int i = global_myrank ; ; i += global_nrank) { std::stringstream fname_ss; if (single) fname_ss << dir; else fname_ss << dir << i; FILE* fp; if (binaryformat) { fp = fopen(fname_ss.str().c_str(), "rb"); } else { fp = fopen(fname_ss.str().c_str(), "r"); } if(!fp) { printf("Could not open file: %s\n", fname_ss.str().c_str()); break; } else { printf("Reading file: %s\n", fname_ss.str().c_str()); } int m_, n_; unsigned long nnz_; get_maxid_and_nnz<T>(fp, &m_, &n_, &nnz_, &symmetric, &pattern, binaryformat, header, edgeweights); edgelist->m = std::max(m_, edgelist->m); edgelist->n = std::max(n_, edgelist->n); edgelist->nnz += nnz_; if (symmetric) edgelist->nnz += nnz_; fclose(fp); if (single) break; } int local_max_m = edgelist->m; int max_m = edgelist->m; int local_max_n = edgelist->n; int max_n = edgelist->n; MPI_Allreduce(&local_max_m, &max_m, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(&local_max_n, &max_n, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); edgelist->m = max_m; edgelist->n = max_n; std::cout << (symmetric ? "Undirected " : "Directed ") << (pattern ? "Unweighted" : "Weighted") << std::endl; std::cout << "Got: " << edgelist->m << " by " << edgelist->n << " vertices" << std::endl; std::cout << "Got: " << edgelist->nnz << " edges" << std::endl; edgelist->edges = reinterpret_cast<edge_t<T>*>( _mm_malloc((uint64_t)edgelist->nnz * (uint64_t)sizeof(edge_t<T>), 64)); unsigned long int nnzcnt = 0; for(int i = global_myrank ; ; i += global_nrank) { std::stringstream fname_ss; if (single) fname_ss << dir; else fname_ss << dir << i; //printf("Opening file: %s\n", fname_ss.str().c_str()); FILE* fp; if (binaryformat) { fp = fopen(fname_ss.str().c_str(), "rb"); } else { fp = fopen(fname_ss.str().c_str(), "r"); } if(!fp) break; if (header) { //remove header int m_, n_; unsigned long nnz_; get_maxid_and_nnz<T>(fp, &m_, &n_, &nnz_, &symmetric, &pattern, binaryformat, header, edgeweights); } int j = 0; while(true) { if (feof(fp)) { break; } if (!readLine<T>(fp, &(edgelist->edges[nnzcnt].src), &(edgelist->edges[nnzcnt].dst), &(edgelist->edges[nnzcnt].val), binaryformat, !pattern && edgeweights)) { break; } #ifdef __DEBUG //std::cout <<(edgelist->edges[nnzcnt].src) << " " << (edgelist->edges[nnzcnt].dst) << std::endl; if(edgelist->edges[nnzcnt].src <= 0 || edgelist->edges[nnzcnt].dst <= 0 || edgelist->edges[nnzcnt].src > edgelist->m || edgelist->edges[nnzcnt].dst > edgelist->n) { std::cout << "Invalid edge, i, j, nnz: " << i << " , " << j << " , " << nnzcnt << std::endl; exit(0); } j++; #endif nnzcnt++; if (symmetric) { edgelist->edges[nnzcnt].src = edgelist->edges[nnzcnt-1].dst; edgelist->edges[nnzcnt].dst = edgelist->edges[nnzcnt-1].src; edgelist->edges[nnzcnt].val = edgelist->edges[nnzcnt-1].val; nnzcnt++; } } fclose(fp); if (single) break; } } template <typename T> void randomize_edgelist_square(edgelist_t<T>* edgelist) { unsigned int* mapping = new unsigned int[edgelist->m]; unsigned int* rval = new unsigned int[edgelist->m]; int global_myrank = get_global_myrank(); if (global_myrank == 0) { srand(5); // #pragma omp parallel for for (int i = 0; i < edgelist->m; i++) { mapping[i] = i; rval[i] = rand() % edgelist->m; } for (int i = 0; i < edgelist->m; i++) { unsigned int tmp = mapping[i]; mapping[i] = mapping[rval[i]]; mapping[rval[i]] = tmp; } } delete[] rval; MPI_Bcast(mapping, edgelist->m, MPI_INT, 0, MPI_COMM_WORLD); #pragma omp parallel for for (int i = 0; i < edgelist->nnz; i++) { edgelist->edges[i].src = mapping[edgelist->edges[i].src - 1] + 1; edgelist->edges[i].dst = mapping[edgelist->edges[i].dst - 1] + 1; } delete[] mapping; } template<typename T> void remove_empty_columns(edgelist_t<T> * edges, int ** remaining_indices) { // Remove empty columns bool * colexists = new bool[edges->n]; memset(colexists, 0, edges->n * sizeof(bool)); int * new_colids = new int[edges->n+1]; memset(new_colids, 0, (edges->n + 1) * sizeof(int)); int new_ncols = 0; for(int i = 0 ; i < edges->nnz ; i++) { if(!colexists[edges->edges[i].dst-1]) { new_ncols++; } colexists[edges->edges[i].dst-1] = true; } std::cout << "New ncols: " << new_ncols << std::endl; *(remaining_indices) = (int*) _mm_malloc(new_ncols * sizeof(int), 64); int new_colcnt = 0; for(int i = 0 ; i < edges->n; i++) { new_colids[i+1] = (colexists[i] ? 1 : 0) + new_colids[i]; if(colexists[i]) { assert(new_colcnt < new_ncols); (*(remaining_indices))[new_colcnt] = i+1; new_colcnt++; } } assert(new_colcnt == new_ncols); #pragma omp parallel for for(int i = 0 ; i < edges->nnz ; i++) { edges->edges[i].dst = new_colids[edges->edges[i].dst-1] + 1; assert(edges->edges[i].dst - 1 >= 0); assert(edges->edges[i].dst - 1 < new_ncols); } edges->n = new_ncols; delete [] colexists; delete [] new_colids; } template<typename T> void filter_edges_by_row(edgelist_t<T> * edges, int start_row, int end_row) { int valid_edgecnt = 0; for(int i = 0 ; i < edges->nnz ; i++) { if(edges->edges[i].src-1 < end_row && edges->edges[i].src-1 >= start_row) { edges->edges[valid_edgecnt] = edges->edges[i]; edges->edges[valid_edgecnt].src -= start_row; valid_edgecnt++; } } edges->nnz = valid_edgecnt; edges->m = (end_row-start_row); std::cout << "New edges->m: " << edges->m << std::endl; } template<typename T> void get_dimensions(edge_t<T> * edges, int nnz, int &max_m, int &max_n) { int local_max_m = 0; int local_max_n = 0; #pragma omp parallel for reduction(max:local_max_m, local_max_n) for(int i = 0 ; i < nnz ; i++) { local_max_m = std::max(local_max_m, edges[i].src); local_max_n = std::max(local_max_n, edges[i].dst); } MPI_Allreduce(&local_max_m, &max_m, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(&local_max_n, &max_n, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); } template <typename T> void ReadEdges(edgelist_t<T>* edgelist, const char* fname_in, bool binaryformat=true, bool header=true, bool edgeweights=true, bool randomize=false) { load_edgelist(fname_in, edgelist, binaryformat, header, edgeweights); if (randomize) { randomize_edgelist_square<T>(edgelist); } } template <typename T> void WriteEdges(const edgelist_t<T>& edgelist, const char* fname_in, bool binaryformat=true, bool header=true, bool edgeweights=true) { write_edgelist(fname_in, edgelist, binaryformat, header, edgeweights); } #endif // SRC_EDGELIST_H_
lis_matrix_jds.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <stdarg.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" /************************************************ * function | SOM | *-----------------------------+-----+ * lis_matrix_set | o | * lis_matrix_setDLU | o | * lis_matrix_malloc | o | * lis_matrix_elements_copy | o | * lis_matrix_transpose | o | * lis_matrix_split | o | * lis_matrix_merge | o | *-----------------------------+-----+-----+ * function |merge|split| *-----------------------------+-----+-----| * lis_matrix_convert | o | | * lis_matrix_copy | o | o | * lis_matrix_get_jdsgonal | o | o | * lis_matrix_scaling | o | o | * lis_matrix_scaling_symm | o | o | * lis_matrix_normf | o | o | * lis_matrix_sort | o | o | * lis_matrix_solve | xxx | o | * lis_matrix_solvet | xxx | o | ************************************************/ #undef __FUNC__ #define __FUNC__ "lis_matrix_set_jds" LIS_INT lis_matrix_set_jds(LIS_INT nnz, LIS_INT maxnzr, LIS_INT *perm, LIS_INT *ptr, LIS_INT *index, LIS_SCALAR *value, LIS_MATRIX A) { LIS_INT i,n; LIS_INT *col; LIS_INT err; LIS_DEBUG_FUNC_IN; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif n = A->n; col = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_matrix_set_jds::col"); if( col==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } for(i=0;i<n;i++) { col[perm[i]] = i; } A->col = col; A->row = perm; A->ptr = ptr; A->index = index; A->value = value; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_JDS; A->nnz = nnz; A->maxnzr = maxnzr; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_setDLU_jds" LIS_INT lis_matrix_setDLU_jds(LIS_INT lnnz, LIS_INT unnz, LIS_INT lmaxnzr, LIS_INT umaxnzr, LIS_SCALAR *diag, LIS_INT *lperm, LIS_INT *lptr, LIS_INT *lindex, LIS_SCALAR *lvalue, LIS_INT *uperm, LIS_INT *uptr, LIS_INT *uindex, LIS_SCALAR *uvalue, LIS_MATRIX A) { LIS_INT n,i,err; LIS_INT *lcol,*ucol; LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; n = A->n; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif A->L = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_jds::A->L"); if( A->L==NULL ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT)); return LIS_OUT_OF_MEMORY; } A->U = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_jds::A->U"); if( A->U==NULL ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT)); lis_matrix_DLU_destroy(A); return LIS_OUT_OF_MEMORY; } err = lis_matrix_diag_create(A->n,0,A->comm,&D); if( err ) { lis_matrix_DLU_destroy(A); return err; } lcol = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_matrix_setDLU_jds::lcol"); if( lcol==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); lis_matrix_DLU_destroy(A); return LIS_OUT_OF_MEMORY; } ucol = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_matrix_setDLU_jds::ucol"); if( ucol==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); lis_matrix_DLU_destroy(A); lis_free(lcol); return LIS_OUT_OF_MEMORY; } for(i=0;i<n;i++) { lcol[lperm[i]] = i; ucol[uperm[i]] = i; } lis_free(D->value); D->value = diag; A->D = D; A->L->nnz = lnnz; A->L->maxnzr = lmaxnzr; A->L->col = lcol; A->L->row = lperm; A->L->ptr = lptr; A->L->index = lindex; A->L->value = lvalue; A->U->nnz = unnz; A->U->maxnzr = umaxnzr; A->U->col = ucol; A->U->row = uperm; A->U->ptr = uptr; A->U->index = uindex; A->U->value = uvalue; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_JDS; A->is_splited = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_malloc_jds" LIS_INT lis_matrix_malloc_jds(LIS_INT n, LIS_INT nnz, LIS_INT maxnzr, LIS_INT **perm, LIS_INT **ptr, LIS_INT **index, LIS_SCALAR **value) { LIS_INT nprocs; LIS_DEBUG_FUNC_IN; *perm = NULL; *ptr = NULL; *index = NULL; *value = NULL; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif *perm = (LIS_INT *)lis_malloc( n*sizeof(LIS_INT),"lis_matrix_malloc_jds::perm" ); if( *perm==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); lis_free2(4,*perm,*ptr,*index,*value); return LIS_OUT_OF_MEMORY; } *ptr = (LIS_INT *)lis_malloc( nprocs*(maxnzr+1)*sizeof(LIS_INT),"lis_matrix_malloc_jds::ptr" ); if( *ptr==NULL ) { LIS_SETERR_MEM(nprocs*(maxnzr+1)*sizeof(LIS_INT)); lis_free2(4,*perm,*ptr,*index,*value); return LIS_OUT_OF_MEMORY; } *index = (LIS_INT *)lis_malloc( nnz*sizeof(LIS_INT),"lis_matrix_malloc_jds::index" ); if( *index==NULL ) { LIS_SETERR_MEM(nnz*sizeof(LIS_INT)); lis_free2(4,*perm,*ptr,*index,*value); return LIS_OUT_OF_MEMORY; } *value = (LIS_SCALAR *)lis_malloc( nnz*sizeof(LIS_SCALAR),"lis_matrix_malloc_jds::value" ); if( *value==NULL ) { LIS_SETERR_MEM(nnz*sizeof(LIS_SCALAR)); lis_free2(4,*perm,*ptr,*index,*value); return LIS_OUT_OF_MEMORY; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_elements_copy_jds" LIS_INT lis_matrix_elements_copy_jds(LIS_INT n, LIS_INT maxnzr, LIS_INT *perm, LIS_INT *ptr, LIS_INT *index, LIS_SCALAR *value, LIS_INT *o_perm, LIS_INT *o_ptr, LIS_INT *o_index, LIS_SCALAR *o_value) { LIS_INT i,j,is,ie; LIS_INT nprocs,my_rank; LIS_DEBUG_FUNC_IN; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif #ifdef _OPENMP #pragma omp parallel private(i,j,is,ie,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(j=0;j<maxnzr+1;j++) { o_ptr[my_rank*(maxnzr+1) + j] = ptr[my_rank*(maxnzr+1) + j]; } for(i=is;i<ie;i++) { o_perm[i] = perm[i]; } for(j=0;j<maxnzr;j++) { for(i=ptr[my_rank*(maxnzr+1) + j];i<ptr[my_rank*(maxnzr+1) + j+1];i++) { o_value[i] = value[i]; o_index[i] = index[i]; } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_copy_jds" LIS_INT lis_matrix_copy_jds(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT err; LIS_INT i,n,nnz,lnnz,unnz,maxnzr,lmaxnzr,umaxnzr; LIS_INT *perm,*ptr,*index; LIS_INT *lperm,*lptr,*lindex; LIS_INT *uperm,*uptr,*uindex; LIS_SCALAR *value,*lvalue,*uvalue,*diag; LIS_DEBUG_FUNC_IN; n = Ain->n; if( Ain->is_splited ) { lmaxnzr = Ain->L->maxnzr; umaxnzr = Ain->U->maxnzr; lnnz = Ain->L->nnz; unnz = Ain->U->nnz; lperm = NULL; lptr = NULL; lindex = NULL; uperm = NULL; uptr = NULL; uindex = NULL; diag = NULL; err = lis_matrix_malloc_jds(n,lnnz,lmaxnzr,&lperm,&lptr,&lindex,&lvalue); if( err ) { return err; } err = lis_matrix_malloc_jds(n,unnz,umaxnzr,&uperm,&uptr,&uindex,&uvalue); if( err ) { lis_free2(9,diag,uperm,lperm,uptr,lptr,uindex,lindex,uvalue,lvalue); return err; } diag = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_matrix_copy_jds::diag"); if( diag==NULL ) { lis_free2(9,diag,uperm,lperm,uptr,lptr,uindex,lindex,uvalue,lvalue); return err; } #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { diag[i] = Ain->D->value[i]; } lis_matrix_elements_copy_jds(n,lmaxnzr,Ain->L->row,Ain->L->ptr,Ain->L->index,Ain->L->value,lperm,lptr,lindex,lvalue); lis_matrix_elements_copy_jds(n,umaxnzr,Ain->U->row,Ain->U->ptr,Ain->U->index,Ain->U->value,uperm,uptr,uindex,uvalue); err = lis_matrix_setDLU_jds(lnnz,unnz,lmaxnzr,umaxnzr,diag,lperm,lptr,lindex,lvalue,uperm,uptr,uindex,uvalue,Aout); if( err ) { lis_free2(9,diag,uperm,lperm,uptr,lptr,uindex,lindex,uvalue,lvalue); return err; } } if( !Ain->is_splited || (Ain->is_splited && Ain->is_save) ) { perm = NULL; ptr = NULL; index = NULL; value = NULL; maxnzr = Ain->maxnzr; nnz = Ain->nnz; err = lis_matrix_malloc_jds(n,nnz,maxnzr,&perm,&ptr,&index,&value); if( err ) { return err; } lis_matrix_elements_copy_jds(n,maxnzr,Ain->row,Ain->ptr,Ain->index,Ain->value,perm,ptr,index,value); err = lis_matrix_set_jds(nnz,maxnzr,perm,ptr,index,value,Aout); if( err ) { lis_free2(4,perm,ptr,index,value); return err; } } err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_get_diagonal_jds" LIS_INT lis_matrix_get_diagonal_jds(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j,k,l; LIS_INT n,maxnzr; #ifdef _OPENMP LIS_INT is,ie,js,je; LIS_INT nprocs,my_rank; #endif LIS_DEBUG_FUNC_IN; n = A->n; maxnzr = A->maxnzr; k = n; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { d[i] = A->D->value[i]; } } else { #ifdef _OPENMP nprocs = omp_get_max_threads(); #pragma omp parallel private(i,j,k,l,is,ie,js,je,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); k = ie-is; for(j=0;j<maxnzr;j++) { l = is; js = A->ptr[my_rank*(maxnzr+1) + j]; je = A->ptr[my_rank*(maxnzr+1) + j+1]; for(i=js;i<je;i++) { if( A->row[l]==A->index[i] ) { d[A->row[l]] = A->value[i]; k--; if( k==0 ) goto get_diag_end; } l++; } } get_diag_end: ; } #else for(j=0;j<maxnzr;j++) { l = 0; for(i=A->ptr[j];i<A->ptr[j+1];i++) { if( A->row[l]==A->index[i] ) { d[A->row[l]] = A->value[i]; k--; if( k==0 ) return LIS_SUCCESS; } l++; } } #endif } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_jds" LIS_INT lis_matrix_scaling_jds(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j,k,is,ie,js,je; LIS_INT n,maxnzr; LIS_INT nprocs,my_rank; LIS_DEBUG_FUNC_IN; n = A->n; if( A->is_splited ) { #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,js,je,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(i=is;i<ie;i++) { A->D->value[i] = 1.0; } for(j=0;j<A->L->maxnzr;j++) { k = is; js = A->L->ptr[my_rank*(A->L->maxnzr+1) + j]; je = A->L->ptr[my_rank*(A->L->maxnzr+1) + j+1]; for(i=js;i<je;i++) { A->L->value[i] *= d[A->L->row[k]]; k++; } } for(j=0;j<A->U->maxnzr;j++) { k = is; js = A->U->ptr[my_rank*(A->U->maxnzr+1) + j]; je = A->U->ptr[my_rank*(A->U->maxnzr+1) + j+1]; for(i=js;i<je;i++) { A->U->value[i] *= d[A->U->row[k]]; k++; } } } } else { maxnzr = A->maxnzr; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,js,je,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(j=0;j<maxnzr;j++) { k = is; js = A->ptr[my_rank*(maxnzr+1) + j]; je = A->ptr[my_rank*(maxnzr+1) + j+1]; for(i=js;i<je;i++) { A->value[i] *= d[A->row[k]]; k++; } } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_symm_jds" LIS_INT lis_matrix_scaling_symm_jds(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j,k,is,ie,js,je; LIS_INT n,maxnzr; LIS_INT nprocs,my_rank; LIS_DEBUG_FUNC_IN; n = A->n; if( A->is_splited ) { #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,js,je,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(i=is;i<ie;i++) { A->D->value[i] = 1.0; } for(j=0;j<A->L->maxnzr;j++) { k = is; js = A->L->ptr[my_rank*(A->L->maxnzr+1) + j]; je = A->L->ptr[my_rank*(A->L->maxnzr+1) + j+1]; for(i=js;i<je;i++) { A->L->value[i] *= d[A->L->row[k]]*d[A->L->index[i]]; k++; } } for(j=0;j<A->U->maxnzr;j++) { k = is; js = A->U->ptr[my_rank*(A->U->maxnzr+1) + j]; je = A->U->ptr[my_rank*(A->U->maxnzr+1) + j+1]; for(i=js;i<je;i++) { A->U->value[i] *= d[A->U->row[k]]*d[A->U->index[i]]; k++; } } } } else { maxnzr = A->maxnzr; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,js,je,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(j=0;j<maxnzr;j++) { k = is; js = A->ptr[my_rank*(maxnzr+1) + j]; je = A->ptr[my_rank*(maxnzr+1) + j+1]; for(i=js;i<je;i++) { A->value[i] *= d[A->row[k]]*d[A->index[i]]; k++; } } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_normf_jds" LIS_INT lis_matrix_normf_jds(LIS_MATRIX A, LIS_SCALAR *nrm) { LIS_INT i,j; LIS_INT n; LIS_SCALAR sum; LIS_DEBUG_FUNC_IN; n = A->n; sum = (LIS_SCALAR)0; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(i,j) #endif for(i=0; i<n; i++) { sum += A->D->value[i]*A->D->value[i]; for(j=A->L->index[i];j<A->L->index[i+1];j++) { sum += A->L->value[j]*A->L->value[j]; } for(j=A->U->index[i];j<A->U->index[i+1];j++) { sum += A->U->value[j]*A->U->value[j]; } } } else { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(i,j) #endif for(i=0; i<n; i++) { sum += A->value[i]*A->value[i]; for(j=A->index[i];j<A->index[i+1];j++) { sum += A->value[j]*A->value[j]; } } } *nrm = sqrt(sum); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_transpose_jds" LIS_INT lis_matrix_transpose_jds(LIS_MATRIX Ain, LIS_MATRIX *Aout) { LIS_DEBUG_FUNC_IN; /* err = lis_matrix_convert_jds2ccs(Ain,Aout);*/ (*Aout)->matrix_type = LIS_MATRIX_JDS; (*Aout)->status = LIS_MATRIX_JDS; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_split_jds" LIS_INT lis_matrix_split_jds(LIS_MATRIX A) { LIS_INT i,j,k,kk,n,nnz,maxnzr; LIS_INT lnnz,unnz,lmaxnzr,umaxnzr; LIS_INT err; LIS_INT *liw,*uiw,*liw2,*uiw2; LIS_INT *lperm,*lptr,*lindex,*lcol; LIS_INT *uperm,*uptr,*uindex,*ucol; #ifdef _OPENMP LIS_INT *iw; LIS_INT my_rank,nprocs,is,ie,js,je; #endif LIS_SCALAR *lvalue,*uvalue; LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; n = A->n; nnz = A->nnz; maxnzr = A->maxnzr; lmaxnzr = 0; umaxnzr = 0; lnnz = 0; unnz = 0; liw = NULL; uiw = NULL; liw2 = NULL; uiw2 = NULL; D = NULL; lperm = NULL; lcol = NULL; lptr = NULL; lindex = NULL; lvalue = NULL; uperm = NULL; ucol = NULL; uptr = NULL; uindex = NULL; uvalue = NULL; liw = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_matrix_split_jds::liw"); if( liw==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_ERR_OUT_OF_MEMORY; } uiw = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_matrix_split_jds::uiw"); if( uiw==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); lis_free2(12,liw,uiw,liw2,uiw2,lperm,lptr,lindex,lvalue,uperm,uptr,uindex,uvalue); return LIS_ERR_OUT_OF_MEMORY; } liw2 = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_matrix_split_jds::liw2"); if( liw2==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); lis_free2(12,liw,uiw,liw2,uiw2,lperm,lptr,lindex,lvalue,uperm,uptr,uindex,uvalue); return LIS_ERR_OUT_OF_MEMORY; } uiw2 = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_matrix_split_jds::uiw2"); if( uiw2==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); lis_free2(12,liw,uiw,liw2,uiw2,lperm,lptr,lindex,lvalue,uperm,uptr,uindex,uvalue); return LIS_ERR_OUT_OF_MEMORY; } #ifdef _OPENMP nprocs = omp_get_max_threads(); iw = (LIS_INT *)lis_malloc((nprocs+1)*LIS_VEC_TMP_PADD*sizeof(LIS_INT),"lis_matrix_split_jds::iw"); if( iw==NULL ) { LIS_SETERR_MEM((nprocs+1)*LIS_VEC_TMP_PADD*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } #pragma omp parallel private(i,j,k,is,ie,js,je,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memset(&liw[is],0,(ie-is)*sizeof(LIS_INT)); memset(&uiw[is],0,(ie-is)*sizeof(LIS_INT)); iw[my_rank*LIS_VEC_TMP_PADD] = 0; iw[my_rank*LIS_VEC_TMP_PADD+1] = 0; iw[my_rank*LIS_VEC_TMP_PADD+2] = 0; iw[my_rank*LIS_VEC_TMP_PADD+3] = 0; for(j=0;j<maxnzr;j++) { k = is; js = A->ptr[my_rank*(maxnzr+1) + j]; je = A->ptr[my_rank*(maxnzr+1) + j+1]; for(i=js;i<je;i++) { if( A->index[i]<A->row[k] ) { iw[my_rank*LIS_VEC_TMP_PADD+2]++; liw[k]++; } else if( A->index[i]>A->row[k] ) { iw[my_rank*LIS_VEC_TMP_PADD+3]++; uiw[k]++; } k++; } } for(i=is;i<ie;i++) { if( iw[my_rank*LIS_VEC_TMP_PADD]<liw[i] ) iw[my_rank*LIS_VEC_TMP_PADD] = liw[i]; if( iw[my_rank*LIS_VEC_TMP_PADD+1]<uiw[i] ) iw[my_rank*LIS_VEC_TMP_PADD+1] = uiw[i]; } } iw[4] = 0; iw[5] = 0; for(i=0;i<nprocs;i++) { if( iw[i*LIS_VEC_TMP_PADD]>lmaxnzr ) lmaxnzr = iw[i*LIS_VEC_TMP_PADD]; if( iw[i*LIS_VEC_TMP_PADD+1]>umaxnzr ) umaxnzr = iw[i*LIS_VEC_TMP_PADD+1]; iw[(i+1)*LIS_VEC_TMP_PADD+4] = iw[i*LIS_VEC_TMP_PADD+4] + iw[i*LIS_VEC_TMP_PADD+2]; iw[(i+1)*LIS_VEC_TMP_PADD+5] = iw[i*LIS_VEC_TMP_PADD+5] + iw[i*LIS_VEC_TMP_PADD+3]; } lnnz = iw[nprocs*LIS_VEC_TMP_PADD+4]; unnz = iw[nprocs*LIS_VEC_TMP_PADD+5]; #else memset(liw,0,n*sizeof(LIS_INT)); memset(uiw,0,n*sizeof(LIS_INT)); for(j=0;j<maxnzr;j++) { k = 0; for(i=A->ptr[j];i<A->ptr[j+1];i++) { if( A->index[i]<A->row[k] ) { lnnz++; liw[k]++; } else if( A->index[i]>A->row[k] ) { unnz++; uiw[k]++; } k++; } } for(i=0;i<n;i++) { if( lmaxnzr<liw[i] ) lmaxnzr = liw[i]; if( umaxnzr<uiw[i] ) umaxnzr = uiw[i]; } #endif err = lis_matrix_LU_create(A); if( err ) { lis_free2(12,liw,uiw,liw2,uiw2,lperm,lptr,lindex,lvalue,uperm,uptr,uindex,uvalue); return err; } err = lis_matrix_malloc_jds(n,lnnz,lmaxnzr,&lperm,&lptr,&lindex,&lvalue); if( err ) { lis_free2(12,liw,uiw,liw2,uiw2,lperm,lptr,lindex,lvalue,uperm,uptr,uindex,uvalue); return err; } err = lis_matrix_malloc_jds(n,unnz,umaxnzr,&uperm,&uptr,&uindex,&uvalue); if( err ) { lis_free2(12,liw,uiw,liw2,uiw2,lperm,lptr,lindex,lvalue,uperm,uptr,uindex,uvalue); return err; } err = lis_matrix_diag_duplicateM(A,&D); if( err ) { lis_free2(12,liw,uiw,liw2,uiw2,lperm,lptr,lindex,lvalue,uperm,uptr,uindex,uvalue); return err; } #ifdef _OPENMP nprocs = omp_get_max_threads(); #pragma omp parallel private(i,j,k,is,ie,js,je,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memset(&lptr[my_rank*(lmaxnzr+1)],0,(lmaxnzr+1)*sizeof(LIS_INT)); memset(&uptr[my_rank*(umaxnzr+1)],0,(umaxnzr+1)*sizeof(LIS_INT)); for(i=is;i<ie;i++) { lperm[i] = A->row[i]; uperm[i] = A->row[i]; for(j=0;j<liw[i];j++) { lptr[my_rank*(lmaxnzr+1) + j+1]++; } for(j=0;j<uiw[i];j++) { uptr[my_rank*(umaxnzr+1) + j+1]++; } } lis_sortr_ii(is,ie-1,liw,lperm); lis_sortr_ii(is,ie-1,uiw,uperm); lptr[my_rank*(lmaxnzr+1)] = iw[my_rank*LIS_VEC_TMP_PADD+4]; uptr[my_rank*(umaxnzr+1)] = iw[my_rank*LIS_VEC_TMP_PADD+5]; for(j=0;j<lmaxnzr;j++) { lptr[my_rank*(lmaxnzr+1) + j+1] += lptr[my_rank*(lmaxnzr+1) + j]; } for(j=0;j<umaxnzr;j++) { uptr[my_rank*(umaxnzr+1) + j+1] += uptr[my_rank*(umaxnzr+1) + j]; } for(i=is;i<ie;i++) { liw[i] = 0; uiw[i] = 0; liw2[lperm[i]] = i; uiw2[uperm[i]] = i; } for(j=0;j<maxnzr;j++) { k = is; for(i=A->ptr[my_rank*(maxnzr+1) + j];i<A->ptr[my_rank*(maxnzr+1) + j+1];i++) { if( A->index[i]<A->row[k] ) { kk = lptr[my_rank*(lmaxnzr+1) + liw[A->row[k]]] + liw2[A->row[k]] - is; liw[A->row[k]]++; lindex[kk] = A->index[i]; lvalue[kk] = A->value[i]; } else if( A->index[i]>A->row[k] ) { kk = uptr[my_rank*(umaxnzr+1) + uiw[A->row[k]]] + uiw2[A->row[k]] - is; uiw[A->row[k]]++; uindex[kk] = A->index[i]; uvalue[kk] = A->value[i]; } else { D->value[A->row[k]] = A->value[i]; } k++; } } } lis_free(iw); #else memset(lptr,0,(lmaxnzr+1)*sizeof(LIS_INT)); memset(uptr,0,(umaxnzr+1)*sizeof(LIS_INT)); for(i=0;i<n;i++) { lperm[i] = A->row[i]; uperm[i] = A->row[i]; for(j=0;j<liw[i];j++) { lptr[j+1]++; } for(j=0;j<uiw[i];j++) { uptr[j+1]++; } } lis_sortr_ii(0,n-1,liw,lperm); lis_sortr_ii(0,n-1,uiw,uperm); for(j=0;j<lmaxnzr;j++) { lptr[j+1] += lptr[j]; } for(j=0;j<umaxnzr;j++) { uptr[j+1] += uptr[j]; } for(i=0;i<n;i++) { liw[i] = 0; uiw[i] = 0; liw2[lperm[i]] = i; uiw2[uperm[i]] = i; } for(j=0;j<maxnzr;j++) { k = 0; for(i=A->ptr[j];i<A->ptr[j+1];i++) { if( A->index[i]<A->row[k] ) { kk = lptr[liw[A->row[k]]] + liw2[A->row[k]]; liw[A->row[k]]++; lindex[kk] = A->index[i]; lvalue[kk] = A->value[i]; } else if( A->index[i]>A->row[k] ) { kk = uptr[uiw[A->row[k]]] + uiw2[A->row[k]]; uiw[A->row[k]]++; uindex[kk] = A->index[i]; uvalue[kk] = A->value[i]; } else { D->value[A->row[k]] = A->value[i]; } k++; } } #endif A->L->nnz = lnnz; A->L->maxnzr = lmaxnzr; A->L->col = liw2; A->L->row = lperm; A->L->ptr = lptr; A->L->index = lindex; A->L->value = lvalue; A->U->nnz = unnz; A->U->maxnzr = umaxnzr; A->U->col = uiw2; A->U->row = uperm; A->U->ptr = uptr; A->U->index = uindex; A->U->value = uvalue; A->D = D; A->is_splited = LIS_TRUE; lis_free2(2,liw,uiw); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_merge_jds" LIS_INT lis_matrix_merge_jds(LIS_MATRIX A) { LIS_INT i,j,k,kk,ie; LIS_INT n,nnz,maxnzr,nn; LIS_INT err; LIS_INT *perm,*ptr,*index,*iw,*iw2; LIS_SCALAR *value; #ifdef _OPENMP LIS_INT is,js,je,nprocs,my_rank; LIS_INT *iw3; #endif LIS_DEBUG_FUNC_IN; n = A->n; nn = n; perm = NULL; ptr = NULL; index = NULL; value = NULL; iw = NULL; iw2 = NULL; nnz = A->L->nnz + A->U->nnz + n; iw = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_matrix_merge_jds::iw"); if( iw==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } iw2 = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_matrix_merge_jds::iw2"); if( iw2==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); lis_free2(2,iw,iw2); return LIS_OUT_OF_MEMORY; } #ifdef _OPENMP nprocs = omp_get_max_threads(); iw3 = (LIS_INT *)lis_malloc((nprocs+1)*LIS_VEC_TMP_PADD*sizeof(LIS_INT),"lis_matrix_merge_jds::iw3"); if( iw3==NULL ) { LIS_SETERR_MEM((nprocs+1)*LIS_VEC_TMP_PADD*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } #pragma omp parallel private(i,j,k,is,ie,js,je,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); iw3[my_rank*LIS_VEC_TMP_PADD] = 0; iw3[my_rank*LIS_VEC_TMP_PADD+1] = (ie-is); for(i=is;i<ie;i++) { iw[i] = 1; } for(j=0;j<A->L->maxnzr;j++) { k = is; js = A->L->ptr[my_rank*(A->L->maxnzr+1) + j]; je = A->L->ptr[my_rank*(A->L->maxnzr+1) + j+1]; for(i=js;i<je;i++) { iw3[my_rank*LIS_VEC_TMP_PADD+1]++; iw[A->L->row[k++]]++; } } for(j=0;j<A->U->maxnzr;j++) { k = is; js = A->U->ptr[my_rank*(A->U->maxnzr+1) + j]; je = A->U->ptr[my_rank*(A->U->maxnzr+1) + j+1]; for(i=js;i<je;i++) { iw3[my_rank*LIS_VEC_TMP_PADD+1]++; iw[A->U->row[k++]]++; } } for(i=is;i<ie;i++) { if( iw3[my_rank*LIS_VEC_TMP_PADD]<iw[i] ) iw3[my_rank*LIS_VEC_TMP_PADD] = iw[i]; } } maxnzr = 0; iw3[2] = 0; for(i=0;i<nprocs;i++) { if( iw3[i*LIS_VEC_TMP_PADD]>maxnzr ) maxnzr = iw3[i*LIS_VEC_TMP_PADD]; iw3[(i+1)*LIS_VEC_TMP_PADD+2] = iw3[i*LIS_VEC_TMP_PADD+2] + iw3[i*LIS_VEC_TMP_PADD+1]; } #else for(i=0;i<n;i++) { iw[i] = 1; } for(j=0;j<A->L->maxnzr;j++) { ie = A->L->ptr[j+1] - A->L->ptr[j]; for(i=0;i<ie;i++) { iw[A->L->row[i]]++; } } for(j=0;j<A->U->maxnzr;j++) { ie = A->U->ptr[j+1] - A->U->ptr[j]; for(i=0;i<ie;i++) { iw[A->U->row[i]]++; } } maxnzr = 0; for(i=0;i<n;i++) { if( maxnzr<iw[i] ) maxnzr = iw[i]; } #endif err = lis_matrix_malloc_jds(n,nnz,maxnzr,&perm,&ptr,&index,&value); if( err ) { lis_free2(2,iw,iw2); return err; } #ifdef _OPENMP nprocs = omp_get_max_threads(); #pragma omp parallel private(i,j,k,is,ie,js,je,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memset(&ptr[my_rank*(maxnzr+1)],0,(maxnzr+1)*sizeof(LIS_INT)); for(i=is;i<ie;i++) { perm[i] = i; for(j=0;j<iw[i];j++) { ptr[my_rank*(maxnzr+1) + j+1]++; } } lis_sortr_ii(is,ie-1,iw,perm); ptr[my_rank*(maxnzr+1)] = iw3[my_rank*LIS_VEC_TMP_PADD+2]; for(j=0;j<maxnzr;j++) { ptr[my_rank*(maxnzr+1) + j+1] += ptr[my_rank*(maxnzr+1) + j]; } for(i=is;i<ie;i++) { iw[i] = 0; iw2[perm[i]] = i; } for(j=0;j<A->L->maxnzr;j++) { k = is; for(i=A->L->ptr[my_rank*(A->L->maxnzr+1) + j];i<A->L->ptr[my_rank*(A->L->maxnzr+1) + j+1];i++) { kk = ptr[my_rank*(maxnzr+1) + iw[A->L->row[k]]] + iw2[A->L->row[k]] - is; iw[A->L->row[k]]++; index[kk] = A->L->index[i]; value[kk] = A->L->value[i]; k++; } } for(i=is;i<ie;i++) { kk = ptr[my_rank*(maxnzr+1) + iw[i]] + iw2[i] - is; iw[i]++; index[kk] = i; value[kk] = A->D->value[i]; } for(j=0;j<A->U->maxnzr;j++) { k = is; for(i=A->U->ptr[my_rank*(A->U->maxnzr+1) + j];i<A->U->ptr[my_rank*(A->U->maxnzr+1) + j+1];i++) { kk = ptr[my_rank*(maxnzr+1) + iw[A->U->row[k]]] + iw2[A->U->row[k]] - is; iw[A->U->row[k]]++; index[kk] = A->U->index[i]; value[kk] = A->U->value[i]; k++; } } } #else memset(ptr,0,(maxnzr+1)*sizeof(LIS_INT)); for(i=0;i<n;i++) { perm[i] = i; for(j=0;j<iw[i];j++) { ptr[j+1]++; } } lis_sortr_ii(0,n-1,iw,perm); for(j=0;j<maxnzr;j++) { ptr[j+1] += ptr[j]; } for(i=0;i<n;i++) { iw[i] = 0; iw2[perm[i]] = i; } for(j=0;j<A->L->maxnzr;j++) { k = 0; for(i=A->L->ptr[j];i<A->L->ptr[j+1];i++) { kk = ptr[iw[A->L->row[k]]] + iw2[A->L->row[k]]; iw[A->L->row[k]]++; index[kk] = A->L->index[i]; value[kk] = A->L->value[i]; k++; } } for(i=0;i<n;i++) { kk = ptr[iw[i]] + iw2[i]; iw[i]++; index[kk] = i; value[kk] = A->D->value[i]; } for(j=0;j<A->U->maxnzr;j++) { k = 0; for(i=A->U->ptr[j];i<A->U->ptr[j+1];i++) { kk = ptr[iw[A->U->row[k]]] + iw2[A->U->row[k]]; iw[A->U->row[k]]++; index[kk] = A->U->index[i]; value[kk] = A->U->value[i]; k++; } } #endif A->nnz = nnz; A->row = perm; A->ptr = ptr; A->value = value; A->index = index; lis_free2(2,iw,iw2); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_sort_jds" LIS_INT lis_matrix_sort_jds(LIS_MATRIX A) { LIS_INT i,n; LIS_DEBUG_FUNC_IN; if( !A->is_sorted ) { n = A->n; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { lis_sort_id(A->L->ptr[i],A->L->ptr[i+1]-1,A->L->index,A->L->value); lis_sort_id(A->U->ptr[i],A->U->ptr[i+1]-1,A->U->index,A->U->value); } } else { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { lis_sort_id(A->ptr[i],A->ptr[i+1]-1,A->index,A->value); } } A->is_sorted = LIS_TRUE; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solve_jds" LIS_INT lis_matrix_solve_jds(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,k,l,n; LIS_SCALAR t; LIS_SCALAR *b,*x; LIS_DEBUG_FUNC_IN; n = A->n; b = B->value; x = X->value; switch(flag) { case LIS_MATRIX_LOWER: for(i=0;i<n;i++) { k = A->L->col[i]; l = 0; j = A->L->ptr[l++] + k; t = b[i]; while( j<A->L->ptr[l] && l<=A->L->maxnzr ) { t -= A->L->value[j] * x[A->L->index[j]]; j = A->L->ptr[l++] + k; } x[i] = t * A->WD->value[i]; } break; case LIS_MATRIX_UPPER: for(i=n-1;i>=0;i--) { k = A->U->col[i]; l = 0; j = A->U->ptr[l++] + k; t = b[i]; while( j<A->U->ptr[l] && l<=A->U->maxnzr ) { t -= A->U->value[j] * x[A->U->index[j]]; j = A->U->ptr[l++] + k; } x[i] = t * A->WD->value[i]; } break; case LIS_MATRIX_SSOR: for(i=0;i<n;i++) { k = A->L->col[i]; l = 0; j = A->L->ptr[l++] + k; t = b[i]; while( j<A->L->ptr[l] && l<=A->L->maxnzr ) { t -= A->L->value[j] * x[A->L->index[j]]; j = A->L->ptr[l++] + k; } x[i] = t * A->WD->value[i]; } for(i=n-1;i>=0;i--) { k = A->U->col[i]; l = 0; j = A->U->ptr[l++] + k; t = 0.0; while( j<A->U->ptr[l] && l<=A->U->maxnzr ) { t += A->U->value[j] * x[A->U->index[j]]; j = A->U->ptr[l++] + k; } x[i] -= t * A->WD->value[i]; } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solvet_jds" LIS_INT lis_matrix_solvet_jds(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,k,l,n; LIS_SCALAR t; LIS_SCALAR *b,*x; LIS_DEBUG_FUNC_IN; n = A->n; b = B->value; x = X->value; lis_vector_copy(B,X); switch(flag) { case LIS_MATRIX_LOWER: for(i=0;i<n;i++) { k = A->U->col[i]; l = 0; j = A->U->ptr[l++] + k; x[i] = x[i] * A->WD->value[i]; while( j<A->U->ptr[l] && l<=A->U->maxnzr ) { x[A->U->index[j]] -= A->U->value[j] * x[i]; j = A->U->ptr[l++] + k; } } break; case LIS_MATRIX_UPPER: for(i=n-1;i>=0;i--) { k = A->L->col[i]; l = 0; j = A->L->ptr[l++] + k; x[i] = x[i] * A->WD->value[i]; while( j<A->L->ptr[l] && l<=A->L->maxnzr ) { x[A->L->index[j]] -= A->L->value[j] * x[i]; j = A->L->ptr[l++] + k; } } break; case LIS_MATRIX_SSOR: for(i=0;i<n;i++) { k = A->U->col[i]; l = 0; j = A->U->ptr[l++] + k; t = x[i] * A->WD->value[i]; while( j<A->U->ptr[l] && l<=A->U->maxnzr ) { x[A->U->index[j]] -= A->U->value[j] * t; j = A->U->ptr[l++] + k; } } for(i=n-1;i>=0;i--) { k = A->L->col[i]; l = 0; j = A->L->ptr[l++] + k; x[i] = x[i] * A->WD->value[i]; t = x[i]; while( j<A->L->ptr[l] && l<=A->L->maxnzr ) { x[A->L->index[j]] -= A->L->value[j] * t; j = A->L->ptr[l++] + k; } } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #ifndef USE_OVERLAP #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_crs2jds" LIS_INT lis_matrix_convert_crs2jds(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,l,js,je; LIS_INT err; LIS_INT gn,n,nnz,maxnzr,nprocs,my_rank; LIS_INT is,ie; LIS_INT *iw,*maxnzrpe,*nnzpe; LIS_INT *perm,*ptr,*index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = Ain->n; gn = Ain->gn; nnz = Ain->nnz; my_rank = Ain->my_rank; is = Ain->is; ie = Ain->ie; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif perm = NULL; ptr = NULL; index = NULL; value = NULL; iw = NULL; maxnzrpe = NULL; nnzpe = NULL; iw = (LIS_INT *)lis_malloc( n*sizeof(LIS_INT),"lis_matrix_convert_crs2jds::iw" ); if( iw==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } maxnzrpe = (LIS_INT *)lis_malloc( nprocs*sizeof(LIS_INT),"lis_matrix_convert_crs2jds::maxnzrpe" ); if( maxnzrpe==NULL ) { LIS_SETERR_MEM(nprocs*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } nnzpe = (LIS_INT *)lis_malloc( (nprocs+1)*sizeof(LIS_INT),"lis_matrix_convert_crs2jds::nnzpe" ); if( nnzpe==NULL ) { LIS_SETERR_MEM((nprocs+1)*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } #ifdef _OPENMP #pragma omp parallel private(i,is,ie,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); maxnzrpe[my_rank] = 0; #ifdef USE_VEC_COMP #pragma cdir nodep #endif for(i=is;i<ie;i++) { iw[i] = Ain->ptr[i+1] - Ain->ptr[i]; if( iw[i] > maxnzrpe[my_rank] ) maxnzrpe[my_rank] = iw[i]; } nnzpe[my_rank+1] = Ain->ptr[ie] - Ain->ptr[is]; } maxnzr = 0; nnzpe[0] = 0; for(i=0;i<nprocs;i++) { if( maxnzrpe[i] > maxnzr ) maxnzr = maxnzrpe[i]; nnzpe[i+1] += nnzpe[i]; } err = lis_matrix_malloc_jds(n,nnz,maxnzr,&perm,&ptr,&index,&value); if( err ) { return err; } /* convert jds */ #ifdef _OPENMP #pragma omp parallel private(i,j,is,ie,js,je,l,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memset(&ptr[my_rank*(maxnzr+1)],0,(maxnzr+1)*sizeof(LIS_INT)); #if 1 for(i=is;i<ie;i++) { perm[i] = i; for(j=0;j<iw[i];j++) { ptr[my_rank*(maxnzr+1) + j+1]++; } } lis_sortr_ii(is,ie-1,iw,perm); #else lis_sort_jds(is,ie,maxnzr,iw,perm); j = 0; for(i=ie-1;i>=is;i--) { for(;j<iw[i];j++) { ptr[my_rank*(maxnzr+1) + j+1] = i-is+1; } if( iw[i]==maxnzr ) break; } #endif ptr[my_rank*(maxnzr+1)] = nnzpe[my_rank]; for(j=0;j<maxnzr;j++) { ptr[my_rank*(maxnzr+1) + j+1] += ptr[my_rank*(maxnzr+1) + j]; } #ifndef USE_VEC_COMP for(i=is;i<ie;i++) { js = Ain->ptr[perm[i]]; je = Ain->ptr[perm[i]+1]; for(j=js;j<je;j++) { l = ptr[my_rank*(maxnzr+1) + j-js]+i-is; value[l] = Ain->value[j]; index[l] = Ain->index[j]; } } #else for(j=0;j<maxnzr;j++) { js = ptr[my_rank*(maxnzr+1) + j]; je = ptr[my_rank*(maxnzr+1) + j+1]; #pragma cdir nodep for(i=js;i<je;i++) { l = Ain->ptr[perm[is+(i-js)]] + j; value[i] = Ain->value[l]; index[i] = Ain->index[l]; } } #endif } err = lis_matrix_set_jds(nnz,maxnzr,perm,ptr,index,value,Aout); if( err ) { lis_free2(7,perm,ptr,index,value,iw,maxnzrpe,nnzpe); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_free2(2,iw,nnzpe); lis_matrix_storage_destroy(Aout); return err; } lis_free2(3,iw,nnzpe,maxnzrpe); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #else #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_crs2jds" LIS_INT lis_matrix_convert_crs2jds(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,jj,k,kk,l,js,je; LIS_INT err; LIS_INT np,n,nnz,nnz2,maxnzr,maxnzr2,nprocs,my_rank; LIS_INT is,ie,pe; LIS_INT *iw,*maxnzrpe,*nnzpe; LIS_INT *iw2,*maxnzrpe2,*nnzpe2; LIS_INT *perm,*ptr,*index; LIS_INT *perm2,*ptr2,*index2; LIS_SCALAR *value; LIS_SCALAR *value2; LIS_DEBUG_FUNC_IN; n = Ain->n; np = Ain->np; nnz = Ain->nnz; my_rank = Ain->my_rank; is = Ain->is; ie = Ain->ie; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif perm = NULL; ptr = NULL; index = NULL; value = NULL; iw = NULL; iw2 = NULL; maxnzrpe = NULL; nnzpe = NULL; lis_matrix_split2_crs(Ain); iw = (LIS_INT *)lis_malloc( n*sizeof(LIS_INT),"lis_matrix_convert_crs2jds::iw" ); if( iw==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } iw2 = (LIS_INT *)lis_malloc( n*sizeof(LIS_INT),"lis_matrix_convert_crs2jds::iw2" ); if( iw2==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } maxnzrpe = (LIS_INT *)lis_malloc( nprocs*sizeof(LIS_INT),"lis_matrix_convert_crs2jds::maxnzrpe" ); if( maxnzrpe==NULL ) { LIS_SETERR_MEM(nprocs*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } maxnzrpe2 = (LIS_INT *)lis_malloc( nprocs*sizeof(LIS_INT),"lis_matrix_convert_crs2jds::maxnzrpe2" ); if( maxnzrpe2==NULL ) { LIS_SETERR_MEM(nprocs*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } nnzpe = (LIS_INT *)lis_malloc( (nprocs+1)*sizeof(LIS_INT),"lis_matrix_convert_crs2jds::nnzpe" ); if( nnzpe==NULL ) { LIS_SETERR_MEM((nprocs+1)*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } nnzpe2 = (LIS_INT *)lis_malloc( (nprocs+1)*sizeof(LIS_INT),"lis_matrix_convert_crs2jds::nnzpe2" ); if( nnzpe2==NULL ) { LIS_SETERR_MEM((nprocs+1)*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); maxnzrpe[my_rank] = 0; maxnzrpe2[my_rank] = 0; for(i=is;i<ie;i++) { iw[i] = Ain->L->ptr[i+1] - Ain->L->ptr[i]; iw2[i] = Ain->U->ptr[i+1] - Ain->U->ptr[i]; if( iw[i] > maxnzrpe[my_rank] ) maxnzrpe[my_rank] = iw[i]; if( iw2[i] > maxnzrpe2[my_rank] ) maxnzrpe2[my_rank] = iw2[i]; } nnzpe[my_rank+1] = Ain->L->ptr[ie] - Ain->L->ptr[is]; nnzpe2[my_rank+1] = Ain->U->ptr[ie] - Ain->U->ptr[is]; } maxnzr = 0; maxnzr2 = 0; nnzpe[0] = 0; nnzpe2[0] = 0; for(i=0;i<nprocs;i++) { if( maxnzrpe[i] > maxnzr ) maxnzr = maxnzrpe[i]; if( maxnzrpe2[i] > maxnzr2 ) maxnzr2 = maxnzrpe2[i]; nnzpe[i+1] += nnzpe[i]; nnzpe2[i+1] += nnzpe2[i]; } nnz = nnzpe[nprocs]; nnz2 = nnzpe2[nprocs]; err = lis_matrix_malloc_jds(n,nnz,maxnzr,&perm,&ptr,&index,&value); if( err ) { return err; } err = lis_matrix_malloc_jds(n,nnz2,maxnzr2,&perm2,&ptr2,&index2,&value2); if( err ) { return err; } err = lis_matrix_LU_create(Aout); if( err ) { lis_free2(7,perm,ptr,index,value,iw,maxnzrpe,nnzpe); return err; } /* convert jds */ #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,kk,js,je,l,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memset(&ptr[my_rank*(maxnzr+1)],0,(maxnzr+1)*sizeof(LIS_INT)); memset(&ptr2[my_rank*(maxnzr2+1)],0,(maxnzr2+1)*sizeof(LIS_INT)); #if 0 for(i=is;i<ie;i++) { perm[i] = i; perm2[i] = i; for(j=0;j<iw[i];j++) { ptr[my_rank*(maxnzr+1) + j+1]++; } for(j=0;j<iw2[i];j++) { ptr2[my_rank*(maxnzr2+1) + j+1]++; } } lis_sortr_ii(is,ie-1,iw,perm); lis_sortr_ii(is,ie-1,iw2,perm2); #else lis_sort_jds(is,ie,maxnzr,iw,perm); lis_sort_jds(is,ie,maxnzr2,iw2,perm2); j = 0; for(i=ie-1;i>=is;i--) { for(;j<iw[i];j++) { ptr[my_rank*(maxnzr+1) + j+1] = i-is+1; } if( iw[i]==maxnzr ) break; } j = 0; for(i=ie-1;i>=is;i--) { for(;j<iw2[i];j++) { ptr2[my_rank*(maxnzr2+1) + j+1] = i-is+1; } if( iw2[i]==maxnzr2 ) break; } #endif ptr[my_rank*(maxnzr+1)] = nnzpe[my_rank]; ptr2[my_rank*(maxnzr2+1)] = nnzpe2[my_rank]; for(j=0;j<maxnzr;j++) { ptr[my_rank*(maxnzr+1) + j+1] += ptr[my_rank*(maxnzr+1) + j]; } for(j=0;j<maxnzr2;j++) { ptr2[my_rank*(maxnzr2+1) + j+1] += ptr2[my_rank*(maxnzr2+1) + j]; } #if 0 for(i=is;i<ie;i++) { kk = 0; js = Ain->L->ptr[perm[i]]; je = Ain->L->ptr[perm[i]+1]; for(j=js;j<je;j++) { l = ptr[my_rank*(maxnzr+1) + kk]+i-is; value[l] = Ain->L->value[j]; index[l] = Ain->L->index[j]; kk++; } kk = 0; js = Ain->U->ptr[perm2[i]]; je = Ain->U->ptr[perm2[i]+1]; for(j=js;j<je;j++) { l = ptr2[my_rank*(maxnzr2+1) + kk]+i-is; value2[l] = Ain->U->value[j]; index2[l] = Ain->U->index[j]; kk++; } } #else for(j=0;j<maxnzr;j++) { js = ptr[my_rank*(maxnzr+1) + j]; je = ptr[my_rank*(maxnzr+1) + j+1]; #pragma cdir nodep for(i=js;i<je;i++) { l = Ain->L->ptr[perm[i-js]] + j; value[i] = Ain->L->value[l]; index[i] = Ain->L->index[l]; } } for(j=0;j<maxnzr2;j++) { js = ptr2[my_rank*(maxnzr2+1) + j]; je = ptr2[my_rank*(maxnzr2+1) + j+1]; #pragma cdir nodep for(i=js;i<je;i++) { l = Ain->U->ptr[perm2[i-js]] + j; value2[i] = Ain->U->value[l]; index2[i] = Ain->U->index[l]; } } #endif } err = lis_matrix_set_jds(nnz,maxnzr,perm,ptr,index,value,Aout); if( err ) { lis_free2(7,perm,ptr,index,value,iw,maxnzrpe,nnzpe); return err; } Aout->U->maxnzr = maxnzr2; Aout->U->row = perm2; Aout->U->ptr = ptr2; Aout->U->index = index2; Aout->U->value = value2; err = lis_matrix_assemble(Aout); if( err ) { lis_free2(2,iw,nnzpe); lis_matrix_storage_destroy(Aout); return err; } Aout->work = (LIS_SCALAR *)lis_malloc(np*sizeof(LIS_SCALAR)); lis_free2(6,iw,nnzpe,maxnzrpe,iw2,nnzpe2,maxnzrpe2); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #endif #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_jds2crs" LIS_INT lis_matrix_convert_jds2crs(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,jj,k,is,ie; LIS_INT err; LIS_INT n,nnz,maxnzr; LIS_INT *iw; LIS_INT *ptr,*index; LIS_SCALAR *value; #ifdef _OPENMP LIS_INT nprocs,my_rank; #endif LIS_DEBUG_FUNC_IN; n = Ain->n; nnz = Ain->nnz; maxnzr = Ain->maxnzr; is = Ain->is; ie = Ain->ie; ptr = NULL; index = NULL; value = NULL; iw = NULL; iw = (LIS_INT *)lis_malloc( n*sizeof(LIS_INT),"lis_matrix_convert_jds2crs::iw" ); if( iw==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } err = lis_matrix_malloc_crs(n,nnz,&ptr,&index,&value); if( err ) { lis_free2(4,ptr,index,value,iw); return err; } /* convert crs */ #ifdef _OPENMP nprocs = omp_get_max_threads(); ptr[0] = 0; #pragma omp parallel private(i,j,k,is,ie,jj,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(i=is;i<ie;i++) { ptr[i+1] = 0; } for(j=0;j<maxnzr;j++) { k = is; for(i=Ain->ptr[my_rank*(maxnzr+1) + j];i<Ain->ptr[my_rank*(maxnzr+1) + j+1];i++) { ptr[Ain->row[k]+1]++; k++; } } #pragma omp barrier #pragma omp single for(i=0;i<n;i++) { ptr[i+1] += ptr[i]; } for(i=is;i<ie;i++) { iw[i] = ptr[i]; } for(j=0;j<maxnzr;j++) { jj = is; for(i=Ain->ptr[my_rank*(maxnzr+1) + j];i<Ain->ptr[my_rank*(maxnzr+1) + j+1];i++) { k = iw[Ain->row[jj]]++; value[k] = Ain->value[i]; index[k] = Ain->index[i]; jj++; } } } #else for(i=0;i<n+1;i++) { ptr[i] = 0; } for(j=0;j<maxnzr;j++) { k = 0; for(i=Ain->ptr[j];i<Ain->ptr[j+1];i++) { ptr[Ain->row[k]+1]++; k++; } } for(i=0;i<n;i++) { ptr[i+1] += ptr[i]; } for(i=0;i<n;i++) { iw[i] = ptr[i]; } for(j=0;j<maxnzr;j++) { jj = 0; for(i=Ain->ptr[j];i<Ain->ptr[j+1];i++) { k = iw[Ain->row[jj]]++; value[k] = Ain->value[i]; index[k] = Ain->index[i]; jj++; } } #endif err = lis_matrix_set_crs(nnz,ptr,index,value,Aout); if( err ) { lis_free2(4,ptr,index,value,iw); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_free(iw); lis_matrix_storage_destroy(Aout); return err; } lis_free(iw); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_sort_jds_order" LIS_INT lis_vector_sort_jds_order(LIS_MATRIX A, LIS_VECTOR v) { LIS_INT i,n,np; LIS_SCALAR *t; LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; t = (LIS_SCALAR *)lis_malloc(np*sizeof(LIS_SCALAR),"lis_vector_sort_jds_order::t"); if( t==NULL ) { LIS_SETERR_MEM(np*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { t[i] = v->value[A->row[i]]; } lis_free(v->value); v->value = t; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_sort_ascending_order" LIS_INT lis_vector_sort_ascending_order(LIS_MATRIX A, LIS_VECTOR v) { LIS_INT i,n,np; LIS_SCALAR *t; LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; t = (LIS_SCALAR *)lis_malloc(np*sizeof(LIS_SCALAR),"lis_vector_sort_ascending_order::t"); if( t==NULL ) { LIS_SETERR_MEM(np*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { t[A->row[i]] = v->value[i]; } lis_free(v->value); v->value = t; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; }
GB_unop.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply) // op(A') function: GB (_unop_tran) // C type: GB_ctype // A type: GB_atype // cast: GB_cast(cij,aij) // unaryop: GB_unaryop(cij,aij) #define GB_ATYPE \ GB_atype #define GB_CTYPE \ GB_ctype // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GB_geta(aij,Ax,pA) #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ GB_unaryop(z, x) ; // casting #define GB_CAST(z, aij) \ GB_cast(z, aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_geta(aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_cast(z, aij) ; \ GB_unaryop(Cx [pC], z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ GB_op_is_identity_with_no_typecast // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ GB_disable //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply) ( GB_ctype *Cx, // Cx and Ax may be aliased const GB_atype *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GB_atype), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_geta(aij, Ax, p) ; GB_cast(z, aij) ; GB_unaryop(Cx [p], z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GB_geta(aij, Ax, p) ; GB_cast(z, aij) ; GB_unaryop(Cx [p], z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tree_extrap_functor.h
// ************************************************************************* // Copyright (C) 2016 by Arash Bakhtiari // You may not use this file except in compliance with the License. // You obtain a copy of the License in the LICENSE file. // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ************************************************************************* #ifndef SRC_TREE_EXTRAP_FUNCTOR_H_ #define SRC_TREE_EXTRAP_FUNCTOR_H_ #include <vector> #include <cheb_node.hpp> #include <profile.hpp> #include <pvfmm_common.hpp> #include "utils/common.h" #include "utils/cubic.h" namespace tbslas { template <typename Real_t, class Tree_t> class FieldExtrapFunctor { public: explicit FieldExtrapFunctor(Tree_t *tp, Tree_t *tc) : tp_(tp), tc_(tc) { typedef typename Tree_t::Node_t Node_t; tbslas::SimConfig *sim_config = tbslas::SimConfigSingleton::Instance(); ////////////////////////////////////////////////////////////////////// // GET THE TREES PARAMETERS ////////////////////////////////////////////////////////////////////// Node_t *n_curr = tp_->PostorderFirst(); while (n_curr != NULL) { if (!n_curr->IsGhost() && n_curr->IsLeaf()) break; n_curr = tp_->PostorderNxt(n_curr); } data_dof_ = n_curr->DataDOF(); } virtual ~FieldExtrapFunctor() {} void operator()(const Real_t *query_points_pos, int num_points, Real_t *out) { //////////////////////////////////////////////////////////////////////// // EXTRAPOLATE IN TIME FOR ALL QUERY POINTS //////////////////////////////////////////////////////////////////////// // =================================== // CONSTRUCT THE EVALUATORS // =================================== tbslas::NodeFieldFunctor<Real_t, Tree_t> tp_evaluator(tp_); tbslas::NodeFieldFunctor<Real_t, Tree_t> tc_evaluator(tc_); // =================================== // EVALUATE AT T^N // =================================== std::vector<Real_t> tnc_pnts_val; tnc_pnts_val.resize(num_points * data_dof_); tc_evaluator(query_points_pos, num_points, tnc_pnts_val.data()); // =================================== // EVALUATE AT T^(N-1) // =================================== std::vector<Real_t> tnp_pnts_val; tnp_pnts_val.resize(num_points * data_dof_); tp_evaluator(query_points_pos, num_points, tnp_pnts_val.data()); // =================================== // COMBINE AND STORE THE VALUES // =================================== Real_t ccoeff = 3.0 / 2; Real_t pcoeff = 0.5; #pragma omp parallel for for (int i = 0; i < tnc_pnts_val.size(); i++) { out[i] = ccoeff * tnc_pnts_val[i] - pcoeff * tnp_pnts_val[i]; } } void update(Tree_t *new_tree, Real_t time) { // POP FRONT (DEALLOCATE AND REMOVE) THE FIRST TREE IN THE QUEUE delete tp_; tp_ = tc_; tc_ = new_tree; } private: Tree_t *tc_; Tree_t *tp_; int data_dof_; }; } // namespace tbslas #endif // SRC_TREE_EXTRAP_FUNCTOR_H_
f3f625.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "omp.h" #include <stdio.h> #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; double section2; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw); int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data; float(*r17)[y_size + 1][z_size + 1]; posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float(*r18)[y_size + 1][z_size + 1]; posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float(*r19)[y_size + 1][z_size + 1]; posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float(*r20)[y_size + 1][z_size + 1]; posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float(*r21)[y_size + 1][z_size + 1]; posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1])); float **r47; posix_memalign((void **)&r47, 64, sizeof(float *) * nthreads); float **r48; posix_memalign((void **)&r48, 64, sizeof(float *) * nthreads); int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; int xb_size = block_sizes[0]; int sf = 2; int t_blk_size = 2 * sf * (time_M - time_m); #pragma omp parallel num_threads(nthreads) { const int tid = omp_get_thread_num(); posix_memalign((void **)&r47[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1])); posix_memalign((void **)&r48[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1])); } /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(static, 1) for (int x = x_m - 1; x <= x_M; x += 1) { for (int y = y_m - 1; y <= y_M; y += 1) { #pragma omp simd aligned(delta, phi, theta : 64) for (int z = z_m - 1; z <= z_M; z += 1) { r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]); r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]); r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]); r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]); r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1); } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m-1 ; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m-1 ; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size) { for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ bf0(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x0_blk0_size, x_size, y0_blk0_size, y_size, z_size, t0, t1, t2, x_M , x_m, y_M , y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, (float **)r47, (float **)r48, time, tw); // x_M - (x_M - x_m + 1)%(x0_blk0_size), x_m, y_M - (y_M - y_m + 1)%(y0_blk0_size), y_m, /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000; } } } } #pragma omp parallel num_threads(nthreads) { const int tid = omp_get_thread_num(); free(r47[tid]); free(r48[tid]); } free(r17); free(r18); free(r19); free(r20); free(r21); free(r47); free(r48); return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data; float(*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r17_vec; float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec; float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec; float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec; float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; float **r47 = (float **)r47_vec; float **r48 = (float **)r48_vec; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; #pragma omp parallel num_threads(nthreads) { const int tid = omp_get_thread_num(); float(*restrict r34)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r47[tid]; float(*restrict r35)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r48[tid]; #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0 - 1, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++) { for (int y = y0_blk0 - 1, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++) { //printf(" bf0 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 4, y - time + 4, xs, ys); #pragma omp simd aligned(u, v : 32) for (int z = z_m - 1; z <= z_M; z += 1) { float r39 = -u[t0][x - time + 4][y - time + 4][z + 4]; r34[xs][ys][z + 1] = 1.0e-1F * (-(r39 + u[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r39 + u[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r39 + u[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]); float r40 = -v[t0][x - time + 4][y - time + 4][z + 4]; r35[xs][ys][z + 1] = 1.0e-1F * (-(r40 + v[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r40 + v[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r40 + v[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]); } } } for (int x = x0_blk0, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++) { for (int y = y0_blk0, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++) { //printf(" bf1 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 4, y - time + 4, xs, ys); #pragma omp simd aligned(damp, epsilon, u, v, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r46 = 1.0 / dt; float r45 = 1.0 / (dt * dt); float r44 = r18[x - time + 1][y - time + 1][z] * r35[xs + 1][ys + 1][z] - r18[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r35[xs + 1][ys][z + 1] - r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r35[xs][ys + 1][z + 1] - r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1]; float r43 = 1.0 / (vp[x - time + 4][y - time + 4][z + 4] * vp[x - time + 4][y - time + 4][z + 4]); float r42 = 1.0e-1F * (-r18[x - time + 1][y - time + 1][z] * r34[xs + 1][ys + 1][z] + r18[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r34[xs + 1][ys][z + 1] + r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r34[xs][ys + 1][z + 1] + r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1]) - 8.33333315e-4F * (u[t0][x - time + 2][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 2][z + 4] + u[t0][x - time + 4][y - time + 4][z + 2] + u[t0][x - time + 4][y - time + 4][z + 6] + u[t0][x - time + 4][y - time + 6][z + 4] + u[t0][x - time + 6][y - time + 4][z + 4]) + 1.3333333e-2F * (u[t0][x - time + 3][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 3][z + 4] + u[t0][x - time + 4][y - time + 4][z + 3] + u[t0][x - time + 4][y - time + 4][z + 5] + u[t0][x - time + 4][y - time + 5][z + 4] + u[t0][x - time + 5][y - time + 4][z + 4]) - 7.49999983e-2F * u[t0][x - time + 4][y - time + 4][z + 4]; float r41 = 1.0 / (r43 * r45 + r46 * damp[x - time + 1][y - time + 1][z + 1]); float r32 = r45 * (-2.0F * u[t0][x - time + 4][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 4][z + 4]); float r33 = r45 * (-2.0F * v[t0][x - time + 4][y - time + 4][z + 4] + v[t1][x - time + 4][y - time + 4][z + 4]); u[t2][x - time + 4][y - time + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x - time + 4][y - time + 4][z + 4] + 1) + 1.0e-1F * r44 * r17[x - time + 1][y - time + 1][z + 1] + r46 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 4][y - time + 4][z + 4])); v[t2][x - time + 4][y - time + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x - time + 1][y - time + 1][z + 1] + 1.0e-1F * r44 + r46 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 4][y - time + 4][z + 4])); } int sp_zi_M = nnz_sp_source_mask[x-time][y-time] - 1; for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1) { int zind = sp_source_mask[x-time][y-time][sp_zi]; float r22 = save_src_u[tw][source_id[x-time][y-time][zind]] * source_mask[x-time][y-time][zind]; u[t2][x -time + 4][y -time + 4][zind + 4] += r22; float r23 = save_src_v[tw][source_id[x-time][y-time][zind]] * source_mask[x-time][y-time][zind]; v[t2][x-time + 4][y-time + 4][zind + 4] += r23; //printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23); } } } } } } }
if_clause_modificado.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv){ int i,n=20,tid,x; int a[n],suma=0,sumalocal; if(argc<3){ fprintf(stderr,"[ERROR]-Falta iteraciones\n"); exit(-1); } n= atoi(argv[1]); if(n>20)n=20; for(i=0;i<n;i++){ a[i]=i; } x= atoi(argv[2]); #pragma omp parallel num_threads(x) if(n>4) default(none)\ private(sumalocal,tid) shared(a,suma,n) { sumalocal=0; tid=omp_get_thread_num(); #pragma omp for private(i) schedule(static) nowait for (i=0; i<n; i++) { sumalocal += a[i]; printf( "thread %d suma de a[%d]=%d sumalocal=%d \n", tid,i,a[i],sumalocal); } #pragma omp atomic suma +=sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n",tid,suma); } }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; register const Quantum *magick_restrict p, *magick_restrict pixels; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if (((threshold_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if (((threshold_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveThresholdImage) #endif proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically selects a threshold and replaces each % pixel in the image with a black pixel if the image intentsity is less than % the selected threshold otherwise white. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(image,histogram,exception); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BilevelImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlackThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClampImage) #endif proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; register ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; ssize_t n; n=0; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OrderedDitherImage) #endif proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PerceptibleImage) #endif proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WhiteThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
OpenACCIR.h
#include <cassert> #include <cstring> #include <fstream> #include <iostream> #include <map> #include <vector> #include "OpenACCKinds.h" enum OpenACCBaseLang { ACC_Lang_C, ACC_Lang_Cplusplus, ACC_Lang_Fortran, ACC_Lang_unknown }; class ACC_SourceLocation { int line; int column; ACC_SourceLocation *parent_construct; public: ACC_SourceLocation(int _line = 0, int _col = 0, ACC_SourceLocation *_parent_construct = NULL) : line(_line), column(_col), parent_construct(_parent_construct){}; void setParentConstruct(ACC_SourceLocation *_parent_construct) { parent_construct = _parent_construct; }; ACC_SourceLocation *getParentConstruct() { return parent_construct; }; int getLine() { return line; }; void setLine(int _line) { line = _line; }; int getColumn() { return column; }; void setColumn(int _column) { column = _column; }; }; /** * The class or baseclass for all the clause classes. For all the clauses that * only take 0 to multiple expression or variables, we use this class to create * objects. For all other clauses, which requires at least one parameters, we * will have an inherit class from this one, and the superclass contains fields * for the parameters */ class OpenACCClause : public ACC_SourceLocation { protected: OpenACCClauseKind kind; // the clause position in the vector of clauses in original order int clause_position = -1; /* consider this is a struct of array, i.e. * the expression/localtionLine/locationColumn are the same index are one * record for an expression and its location */ std::vector<std::string> expressions; std::vector<ACC_SourceLocation> locations; public: OpenACCClause(OpenACCClauseKind k, int _line = 0, int _col = 0) : ACC_SourceLocation(_line, _col), kind(k){}; OpenACCClauseKind getKind() { return kind; }; int getClausePosition() { return clause_position; }; void setClausePosition(int _clause_position) { clause_position = _clause_position; }; // a list of expressions or variables that are language-specific for the // clause, accparser does not parse them, instead, it only stores them as // strings void addLangExpr(std::string expression_string, int line = 0, int col = 0); std::vector<std::string> *getExpressions() { return &expressions; }; virtual std::string toString(); std::string expressionToString(); }; /** * The class for all the OpenACC directives */ class OpenACCDirective : public ACC_SourceLocation { protected: OpenACCDirectiveKind kind; OpenACCBaseLang lang; /* The vector is used to store the pointers of clauses in original order. * While unparsing, the generated pragma keeps the clauses in the same order * as the input. For example, #pragma omp parallel shared(a) private(b) is the * input. The unparsing won't switch the order of share and private clause. * Share clause is always the first. * * For the clauses that could be normalized, we always merge the second one to * the first one. Then the second one will be eliminated and not stored * anywhere. */ std::vector<OpenACCClause *> *clauses_in_original_order = new std::vector<OpenACCClause *>(); /* the map to store clauses of the directive, for each clause, we store a * vector of OpenACCClause objects since there could be multiple clause * objects for those clauses that take parameters, e.g. reduction clause * * for those clauses just take no parameters, but may take some variables or * expressions, we only need to have one OpenACCClause object, e.g. shared, * private. * * The design and use of this map should make sure that for any clause, we * should only have one OpenACCClause object for each instance of kind and * full parameters */ std::map<OpenACCClauseKind, std::vector<OpenACCClause *> *> clauses; /** * * This method searches the clauses map to see whether one or more * OpenACCClause objects of the specified kind parameters exist in the * directive, if so it returns the objects that match. * @param kind clause kind * @param parameters clause parameters * @return */ std::vector<OpenACCClause *> searchOpenACCClause(OpenACCClauseKind kind, int num, int *parameters); /** * Search and add a clause of kind and parameters specified by the variadic * parameters. This should be the only call used to add an OpenACCClause * object. * * The method may simply create an OpenACCClause-subclassed object and return * it. In this way, normalization will be needed later on. * * Or the method may do the normalization while adding a clause. * it first searches the clauses map to see whether an OpenACCClause object * of the specified kind and parameters exists in the map. If so, it only * return that OpenACCClause object, otherwise, it should create a new * OpenACCClause object and insert in the map * * NOTE: if only partial parameters are provided as keys to search for a * clause, the function will only return the first one that matches. Thus, the * method should NOT be called with partial parameters of a specific clause * @param kind * @param parameters clause parameters, number of parameters should be * determined by the kind * @return */ OpenACCClause *addOpenACCClause(OpenACCClauseKind kind, int *parameters); /** * normalize all the clause of a specific kind * @param kind * @return */ void *normalizeClause(OpenACCClauseKind kind); public: OpenACCDirective(OpenACCDirectiveKind k, OpenACCBaseLang _lang = ACC_Lang_unknown, int _line = 0, int _col = 0) : ACC_SourceLocation(_line, _col), kind(k), lang(_lang){}; OpenACCDirectiveKind getKind() { return kind; }; std::map<OpenACCClauseKind, std::vector<OpenACCClause *> *> *getAllClauses() { return &clauses; }; std::vector<OpenACCClause *> *getClauses(OpenACCClauseKind kind) { return clauses[kind]; }; std::vector<OpenACCClause *> *getClausesInOriginalOrder() { return clauses_in_original_order; }; virtual std::string toString(); std::string generatePragmaString(std::string _prefix = "#pragma acc ", std::string _beginning_symbol = "", std::string _ending_symbol = ""); // To call this method directly to add new clause, it can't be protected. OpenACCClause *addOpenACCClause(int, ...); void setBaseLang(OpenACCBaseLang _lang) { lang = _lang; }; OpenACCBaseLang getBaseLang() { return lang; }; }; // Cache directive class OpenACCCacheDirective : public OpenACCDirective { protected: OpenACCCacheDirectiveModifier modifier = ACCC_CACHE_unspecified; std::vector<std::string> expressions; public: OpenACCCacheDirective() : OpenACCDirective(ACCD_cache){}; OpenACCCacheDirectiveModifier getModifier() { return modifier; }; void setModifier(OpenACCCacheDirectiveModifier _modifier) { modifier = _modifier; }; std::vector<std::string> *getExpressions() { return &expressions; }; void addVar(std::string _string) { expressions.push_back(_string); }; std::string toString(); std::string expressionToString(); }; // End directive class OpenACCEndDirective : public OpenACCDirective { protected: OpenACCDirective *paired_directive; public: OpenACCEndDirective() : OpenACCDirective(ACCD_end){}; void setPairedDirective(OpenACCDirective *_paired_directive) { paired_directive = _paired_directive; }; OpenACCDirective *getPairedDirective() { return paired_directive; }; }; // Routine directive class OpenACCRoutineDirective : public OpenACCDirective { protected: std::string name = ""; public: OpenACCRoutineDirective() : OpenACCDirective(ACCD_routine){}; void setName(std::string _name) { name = _name; }; std::string getName() { return name; }; }; // Wait directive class OpenACCWaitDirective : public OpenACCDirective { protected: std::vector<std::string> expressions; std::string devnum = ""; bool queues = false; public: OpenACCWaitDirective() : OpenACCDirective(ACCD_wait){}; void setDevnum(std::string _devnum) { devnum = _devnum; }; std::string getDevnum() { return devnum; }; void setQueues(bool _queues) { queues = _queues; }; bool getQueues() { return queues; }; std::vector<std::string> *getExpressions() { return &expressions; }; void addVar(std::string _string) { expressions.push_back(_string); }; std::string toString(); std::string expressionToString(); }; // Async Clause class OpenACCAsyncClause : public OpenACCClause { public: OpenACCAsyncClause() : OpenACCClause(ACCC_async){}; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Bind Clause class OpenACCBindClause : public OpenACCClause { public: OpenACCBindClause() : OpenACCClause(ACCC_bind){}; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Collapse Clause class OpenACCCollapseClause : public OpenACCClause { public: OpenACCCollapseClause() : OpenACCClause(ACCC_collapse){}; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Copyin Clause class OpenACCCopyinClause : public OpenACCClause { protected: OpenACCCopyinClauseModifier modifier = ACCC_COPYIN_unspecified; public: OpenACCCopyinClause() : OpenACCClause(ACCC_copyin){}; OpenACCCopyinClauseModifier getModifier() { return modifier; }; void setModifier(OpenACCCopyinClauseModifier _modifier) { modifier = _modifier; }; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Copyout Clause class OpenACCCopyoutClause : public OpenACCClause { protected: OpenACCCopyoutClauseModifier modifier = ACCC_COPYOUT_unspecified; public: OpenACCCopyoutClause() : OpenACCClause(ACCC_copyout){}; OpenACCCopyoutClauseModifier getModifier() { return modifier; }; void setModifier(OpenACCCopyoutClauseModifier _modifier) { modifier = _modifier; }; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Create Clause class OpenACCCreateClause : public OpenACCClause { protected: OpenACCCreateClauseModifier modifier = ACCC_CREATE_unspecified; public: OpenACCCreateClause() : OpenACCClause(ACCC_create){}; OpenACCCreateClauseModifier getModifier() { return modifier; }; void setModifier(OpenACCCreateClauseModifier _modifier) { modifier = _modifier; }; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Default Clause class OpenACCDefaultClause : public OpenACCClause { protected: OpenACCDefaultClauseKind default_kind = ACCC_DEFAULT_unspecified; public: OpenACCDefaultClause() : OpenACCClause(ACCC_default){}; OpenACCDefaultClauseKind getKind() { return default_kind; }; void setKind(OpenACCDefaultClauseKind _default_kind) { default_kind = _default_kind; }; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); }; // Default_async Clause class OpenACCDefaultAsyncClause : public OpenACCClause { public: OpenACCDefaultAsyncClause() : OpenACCClause(ACCC_default_async){}; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Device_num Clause class OpenACCDeviceNumClause : public OpenACCClause { public: OpenACCDeviceNumClause() : OpenACCClause(ACCC_device_num){}; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Gang Clause class OpenACCGangClause : public OpenACCClause { public: OpenACCGangClause() : OpenACCClause(ACCC_gang){}; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Num_gangs Clause class OpenACCNumGangsClause : public OpenACCClause { public: OpenACCNumGangsClause() : OpenACCClause(ACCC_num_gangs){}; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Num_workers Clause class OpenACCNumWorkersClause : public OpenACCClause { public: OpenACCNumWorkersClause() : OpenACCClause(ACCC_num_workers){}; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Reduction Clause class OpenACCReductionClause : public OpenACCClause { protected: OpenACCReductionClauseOperator reduction_operator = ACCC_REDUCTION_unspecified; public: OpenACCReductionClause() : OpenACCClause(ACCC_reduction){}; OpenACCReductionClauseOperator getOperator() { return reduction_operator; }; void setOperator(OpenACCReductionClauseOperator _operator) { reduction_operator = _operator; }; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Self Clause class OpenACCSelfClause : public OpenACCClause { public: OpenACCSelfClause() : OpenACCClause(ACCC_self){}; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Vector Clause class OpenACCVectorClause : public OpenACCClause { protected: OpenACCVectorClauseModifier modifier = ACCC_VECTOR_unspecified; public: OpenACCVectorClause() : OpenACCClause(ACCC_vector){}; OpenACCVectorClauseModifier getModifier() { return modifier; }; void setModifier(OpenACCVectorClauseModifier _modifier) { modifier = _modifier; }; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Vector_length Clause class OpenACCVectorLengthClause : public OpenACCClause { public: OpenACCVectorLengthClause() : OpenACCClause(ACCC_vector_length){}; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Wait Clause class OpenACCWaitClause : public OpenACCClause { protected: std::string devnum = ""; bool queues = false; public: OpenACCWaitClause() : OpenACCClause(ACCC_wait){}; void setDevnum(std::string _devnum) { devnum = _devnum; }; std::string getDevnum() { return devnum; }; void setQueues(bool _queues) { queues = _queues; }; bool getQueues() { return queues; }; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); }; // Worker Clause class OpenACCWorkerClause : public OpenACCClause { protected: OpenACCWorkerClauseModifier modifier = ACCC_WORKER_unspecified; public: OpenACCWorkerClause() : OpenACCClause(ACCC_worker){}; OpenACCWorkerClauseModifier getModifier() { return modifier; }; void setModifier(OpenACCWorkerClauseModifier _modifier) { modifier = _modifier; }; static OpenACCClause *addClause(OpenACCDirective *); std::string toString(); void mergeClause(OpenACCDirective *, OpenACCClause *); };
core_math.h
// == mojo ==================================================================== // // Copyright (c) gnawice@gnawice.com. All rights reserved. // See LICENSE in root folder // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files(the "Software"), // to deal in the Software without restriction, including without // limitation the rights to use, copy, modify, merge, publish, distribute, // sublicense, and/or sell copies of the Software, and to permit persons to // whom the Software is furnished to do so, subject to the following // conditions : // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT // OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR // THE USE OR OTHER DEALINGS IN THE SOFTWARE. // // ============================================================================ // core_math.h: defines matrix class and math functions // ==================================================================== mojo == #pragma once #include <math.h> #include <string.h> #include <string> #include <cstdlib> #include <random> #include <algorithm> #include <immintrin.h> namespace mojo { enum pad_type { zero = 0, edge = 1, median_edge = 2 }; inline float dot(const float *x1, const float *x2, const int size) { switch (size) { case 1: return x1[0] * x2[0]; case 2: return x1[0] * x2[0] + x1[1] * x2[1]; case 3: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2]; case 4: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2] + x1[3] * x2[3]; case 5: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2] + x1[3] * x2[3] + x1[4] * x2[4]; default: float v = 0; for (int i = 0; i<size; i++) v += x1[i] * x2[i]; return v; }; } inline float unwrap_2d_dot(const float *x1, const float *x2, const int size, int stride1, int stride2) { float v=0; for(int j=0; j<size; j++) v+= dot(&x1[stride1*j],&x2[stride2*j],size); return v; } // second item is rotated 180 (this is a convolution) inline float dot_rot180(const float *x1, const float *x2, const int size) { switch(size) { case 1: return x1[0]*x2[0]; case 2: return x1[0]*x2[1]+x1[1]*x2[0]; case 3: return x1[0]*x2[2]+x1[1]*x2[1]+x1[2]*x2[0]; case 4: return x1[0]*x2[3]+x1[1]*x2[2]+x1[2]*x2[1]+x1[3]*x2[0]; case 5: return x1[0]*x2[4]+x1[1]*x2[3]+x1[2]*x2[2]+x1[3]*x2[1]+x1[4]*x2[0]; default: float v=0; for(int i=0; i<size; i++) v+=x1[i]*x2[size-i-1]; return v; }; } inline float unwrap_2d_dot_rot180(const float *x1, const float *x2, const int size, int stride1, int stride2) { float v=0; for(int j=0; j<size; j++) { v+= dot_rot180(&x1[stride1*j],&x2[stride2*(size-j-1)],size); } return v; } inline void unwrap_aligned_NxN(const int N, float *aligned_out, const float *in, const int in_size, const int stride = 1) { const int node_size = (in_size - N)/stride + 1; int c1 = 0; int off = 0; const int inc_off = N*N*8; for (int j = 0; j < node_size; j += 1) // intput h { for (int i = 0; i < node_size; i += 1) // intput w { const float *tn = in + j*in_size + i; if(N==5) { for (int k = 0; k < 5; k++) { aligned_out[c1 + 0 + k * 40 + off] = tn[0 + 0 + in_size*k]; aligned_out[c1 + 8 + k * 40 + off] = tn[0 + 1 + in_size*k]; aligned_out[c1 + 16 + k * 40 + off] = tn[0 + 2 + in_size*k]; aligned_out[c1 + 24 + k * 40 + off] = tn[0 + 3 + in_size*k]; aligned_out[c1 + 32 + k * 40 + off] = tn[0 + 4 + in_size*k]; } } else if(N==3) { aligned_out[c1 + off] = tn[0]; aligned_out[c1 + 8 + off] = tn[0 + 1]; aligned_out[c1 + 16 + off] = tn[0 + 2]; aligned_out[c1 + 24 + off] = tn[0 + in_size]; aligned_out[c1 + 32 + off] = tn[0 + 1 + in_size]; aligned_out[c1 + 40 + off] = tn[0 + 2 + in_size]; aligned_out[c1 + 48 + off] = tn[0 + 2 * in_size]; aligned_out[c1 + 56 + off] = tn[0 + 1 + 2 * in_size]; aligned_out[c1 + 64 + off] = tn[0 + 2 + 2 * in_size]; } else { int cnt=0; for (int k = 0; k < N; k++) { for (int m = 0; m < N; m++) { aligned_out[c1 + cnt*8 + off] = tn[0 + m + in_size*k]; cnt++; } } } off++; if (off > 7) { off = 0; c1 += inc_off; } } } } inline void dotsum_unwrapped_NxN(const int N, const float *im, const float *filter_ptr, float *out, const int outsize) { const int NN=N*N; for (int j = 0; j < outsize; j += 8) { float *c = out+j; for(int i=0; i<NN; i++) { const float f = filter_ptr[i]; c[0]+=im[0]*f; c[1]+=im[1]*f; c[2]+=im[2]*f; c[3]+=im[3]*f; c[4]+=im[4]*f; c[5]+=im[5]*f; c[6]+=im[6]*f; c[7]+=im[7]*f; im+=8; } } } #ifdef MOJO_AVX inline void dotsum_unwrapped_2x2(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 32; } _mm256_zeroupper(); } inline void dotsum_unwrapped_3x3(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); for (int j = 0; j < outsize; j += 8)//stride) // intput w { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 72; } _mm256_zeroupper(); } inline void dotsum_unwrapped_4x4(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); const __m256 f9 = _mm256_broadcast_ss(&filter_ptr[9]); const __m256 f10 = _mm256_broadcast_ss(&filter_ptr[10]); const __m256 f11 = _mm256_broadcast_ss(&filter_ptr[11]); const __m256 f12 = _mm256_broadcast_ss(&filter_ptr[12]); const __m256 f13 = _mm256_broadcast_ss(&filter_ptr[13]); const __m256 f14 = _mm256_broadcast_ss(&filter_ptr[14]); const __m256 f15 = _mm256_broadcast_ss(&filter_ptr[15]); for (int j = 0; j < outsize; j += 8)//stride) // intput w { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 72); c1 = _mm256_mul_ps(a, f9); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 80); c1 = _mm256_mul_ps(a, f10); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 88); c1 = _mm256_mul_ps(a, f11); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 96); c1 = _mm256_mul_ps(a, f12); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 104); c1 = _mm256_mul_ps(a, f13); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 112); c1 = _mm256_mul_ps(a, f14); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 120); c1 = _mm256_mul_ps(a, f15); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 128; } _mm256_zeroupper(); } inline void dotsum_unwrapped_5x5(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); const __m256 f9 = _mm256_broadcast_ss(&filter_ptr[9]); const __m256 f10 = _mm256_broadcast_ss(&filter_ptr[10]); const __m256 f11 = _mm256_broadcast_ss(&filter_ptr[11]); const __m256 f12 = _mm256_broadcast_ss(&filter_ptr[12]); const __m256 f13 = _mm256_broadcast_ss(&filter_ptr[13]); const __m256 f14 = _mm256_broadcast_ss(&filter_ptr[14]); const __m256 f15 = _mm256_broadcast_ss(&filter_ptr[15]); const __m256 f16 = _mm256_broadcast_ss(&filter_ptr[16]); const __m256 f17 = _mm256_broadcast_ss(&filter_ptr[17]); const __m256 f18 = _mm256_broadcast_ss(&filter_ptr[18]); const __m256 f19 = _mm256_broadcast_ss(&filter_ptr[19]); const __m256 f20 = _mm256_broadcast_ss(&filter_ptr[20]); const __m256 f21 = _mm256_broadcast_ss(&filter_ptr[21]); const __m256 f22 = _mm256_broadcast_ss(&filter_ptr[22]); const __m256 f23 = _mm256_broadcast_ss(&filter_ptr[23]); const __m256 f24 = _mm256_broadcast_ss(&filter_ptr[24]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 72); c1 = _mm256_mul_ps(a, f9); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 80); c1 = _mm256_mul_ps(a, f10); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 88); c1 = _mm256_mul_ps(a, f11); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 96); c1 = _mm256_mul_ps(a, f12); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 104); c1 = _mm256_mul_ps(a, f13); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 112); c1 = _mm256_mul_ps(a, f14); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 120); c1 = _mm256_mul_ps(a, f15); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 128); c1 = _mm256_mul_ps(a, f16); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 136); c1 = _mm256_mul_ps(a, f17); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 144); c1 = _mm256_mul_ps(a, f18); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 152); c1 = _mm256_mul_ps(a, f19); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 160); c1 = _mm256_mul_ps(a, f20); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 168); c1 = _mm256_mul_ps(a, f21); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 176); c1 = _mm256_mul_ps(a, f22); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 184); c1 = _mm256_mul_ps(a, f23); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 192); c1 = _mm256_mul_ps(a, f24); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 200; } _mm256_zeroupper(); } inline void dotsum_unwrapped_7x7(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); __m256 f[49];//=new __m256(s); for(int i=0; i<49; i++) f[i]= _mm256_broadcast_ss(&filter_ptr[i]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f[0]); for(int i=1; i<49;i++) { a = _mm256_load_ps(_img + 8*i); c1 = _mm256_mul_ps(a, f[i]); c0 = _mm256_add_ps(c0, c1); } a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 49*8; } _mm256_zeroupper(); //delete [] f; } #else // no AVX inline void dotsum_unwrapped_2x2(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(2, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_3x3(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(3, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_4x4(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(4, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_5x5(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(5, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_7x7(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(7, _img, filter_ptr, out, outsize); } #endif // matrix class --------------------------------------------------- // should use opencv if available // class matrix { int _size; int _capacity; float *_x_mem; void delete_x() { delete[] _x_mem; x = NULL; _x_mem = NULL; } // 4 extra for alignment and 4 for 3 padding for SSE //float *new_x(const int size) { _x_mem = new float[size + 4+3]; x = (float *)(((uintptr_t)_x_mem + 16) & ~(uintptr_t)0x0F); return x; } // avx mem aligment float *new_x(const int size) { _x_mem = new float[size + 8 + 7]; x = (float *)(((uintptr_t)_x_mem + 32) & ~(uintptr_t)0x1F); return x; } public: std::string _name; int cols, rows, chans; int chan_stride; int chan_aligned; float *x; // size must be divisible by 8 for AVX virtual int calc_chan_stride(int w, int h) { if (chan_aligned) { int s = w*h; const int remainder = s % 8; if (remainder > 0) s += 8 - remainder; return s; } else return w*h; } matrix( ): cols(0), rows(0), chans(0), _size(0), _capacity(0), chan_stride(0), x(NULL), chan_aligned(0)/*, empty_chan(NULL)*/{} matrix( int _w, int _h, int _c=1, const float *data=NULL, int align_chan=0): cols(_w), rows(_h), chans(_c) { chan_aligned = align_chan; chan_stride = calc_chan_stride(cols, rows); _size= chan_stride*chans; _capacity=_size; x = new_x(_size); if(data!=NULL) memcpy(x,data,_size*sizeof(float)); } // copy constructor - deep copy matrix( const matrix &m) : cols(m.cols), rows(m.rows), chan_aligned(m.chan_aligned), chans(m.chans), chan_stride(m.chan_stride), _size(m._size), _capacity(m._size) {x = new_x(_size); memcpy(x,m.x,sizeof(float)*_size); /*empty_chan = new unsigned char[chans]; memcpy(empty_chan, m.empty_chan, chans);*/} // { v=m.v; x=(float*)v.data();} // copy and pad constructor matrix( const matrix &m, int pad_cols, int pad_rows, mojo::pad_type padding= mojo::zero, int threads=1) : cols(m.cols), rows(m.rows), chans(m.chans), chan_aligned(m.chan_aligned), chan_stride(m.chan_stride), _size(m._size), _capacity(m._size) { x = new_x(_size); memcpy(x, m.x, sizeof(float)*_size); *this = pad(pad_cols, pad_rows, padding, threads); } ~matrix() { if (x) delete_x(); } matrix get_chans(int start_channel, int num_chans=1) const { return matrix(cols,rows,num_chans,&x[start_channel*chan_stride]); } // if edge_pad==0, then the padded area is just 0. // if edge_pad==1 it fills with edge pixel colors // if edge_pad==2 it fills with median edge pixel color matrix pad(int dx, int dy, mojo::pad_type edge_pad = mojo::zero, int threads=1) const { return pad(dx, dy, dx, dy, edge_pad, threads); } matrix pad(int dx, int dy, int dx_right, int dy_bottom, mojo::pad_type edge_pad = mojo::zero, int threads=1) const { matrix v(cols+dx+dx_right,rows+dy+dy_bottom,chans);//,NULL,this->chan_aligned); v.fill(0); //float *new_x = new float[chans*w*h]; #pragma omp parallel for num_threads(threads) for(int k=0; k<chans; k++) { const int v_chan_offset=k*v.chan_stride; const int chan_offset=k*chan_stride; // find median color of perimeter float median = 0.f; if (edge_pad == mojo::median_edge) { int perimeter = 2 * (cols + rows - 2); std::vector<float> d(perimeter); for (int i = 0; i < cols; i++) { d[i] = x[i+ chan_offset]; d[i + cols] = x[i + cols*(rows - 1)+ chan_offset]; } for (int i = 1; i < (rows - 1); i++) { d[i + cols * 2] = x[cols*i+ chan_offset]; // file from back so i dont need to cal index d[perimeter - i] = x[cols - 1 + cols*i+ chan_offset]; } std::nth_element(d.begin(), d.begin() + perimeter / 2, d.end()); median = d[perimeter / 2]; //for (int i = 0; i < v.rows*v.cols; i++) v.x[v_chan_offset + i] = solid_fill; } for(int j=0; j<rows; j++) { memcpy(&v.x[dx+(j+dy)*v.cols+v_chan_offset], &x[j*cols+chan_offset], sizeof(float)*cols); if(edge_pad== mojo::edge) { // do left/right side for(int i=0; i<dx; i++) v.x[i+(j+dy)*v.cols+v_chan_offset]=x[0+j*cols+chan_offset]; for (int i = 0; i<dx_right; i++) v.x[i + dx + cols + (j + dy)*v.cols + v_chan_offset] = x[(cols - 1) + j*cols + chan_offset]; } else if (edge_pad == mojo::median_edge) { for (int i = 0; i < dx; i++) v.x[i + (j + dy)*v.cols + v_chan_offset] = median; for (int i = 0; i < dx_right; i++) v.x[i + dx + cols + (j + dy)*v.cols + v_chan_offset] = median; } } // top bottom pad if(edge_pad== mojo::edge) { for(int j=0; j<dy; j++) memcpy(&v.x[(j)*v.cols+v_chan_offset],&v.x[(dy)*v.cols+v_chan_offset], sizeof(float)*v.cols); for (int j = 0; j<dy_bottom; j++) memcpy(&v.x[(j + dy + rows)*v.cols + v_chan_offset], &v.x[(rows - 1 + dy)*v.cols + v_chan_offset], sizeof(float)*v.cols); } if (edge_pad == mojo::median_edge) { for (int j = 0; j<dy; j++) for (int i = 0; i<v.cols; i++) v.x[i + j*v.cols + v_chan_offset] = median; for (int j = 0; j<dy_bottom; j++) for (int i = 0; i<v.cols; i++) v.x[i + (j + dy + rows)*v.cols + v_chan_offset] = median; } } return v; } matrix crop(int dx, int dy, int w, int h, int threads=1) const { matrix v(w,h,chans); #pragma omp parallel for num_threads(threads) for(int k=0; k<chans; k++) { for(int j=0; j<h; j++) { memcpy(&v.x[j*w+k*v.chan_stride], &x[dx+(j+dy)*cols+k*chan_stride], sizeof(float)*w); } } return v; } mojo::matrix shift(int dx, int dy, mojo::pad_type edge_pad=mojo::zero) { int orig_cols=cols; int orig_rows=rows; int off_x=abs(dx); int off_y=abs(dy); mojo::matrix shifted= pad(off_x, off_y, edge_pad); return shifted.crop(off_x-dx, off_y-dy,orig_cols,orig_rows); } mojo::matrix flip_cols() { mojo::matrix v(cols,rows,chans); for(int k=0; k<chans; k++) for(int j=0; j<rows; j++) for(int i=0; i<cols; i++) v.x[i+j*cols+k*chan_stride]=x[(cols-i-1)+j*cols+k*chan_stride]; return v; } mojo::matrix flip_rows() { mojo::matrix v(cols, rows, chans); for (int k = 0; k<chans; k++) for (int j = 0; j<rows; j++) memcpy(&v.x[(rows-1-j)*cols + k*chan_stride],&x[j*cols + k*chan_stride], cols*sizeof(float)); return v; } void clip(float min, float max) { int s = chan_stride*chans; for (int i = 0; i < s; i++) { if (x[i] < min) x[i] = min; if (x[i] > max) x[i]=max; } } void min_max(float *min, float *max, int *min_i=NULL, int *max_i=NULL) { int s = rows*cols; int mini = 0; int maxi = 0; for (int c = 0; c < chans; c++) { const int t = chan_stride*c; for (int i = t; i < t+s; i++) { if (x[i] < x[mini]) mini = i; if (x[i] > x[maxi]) maxi = i; } } *min = x[mini]; *max = x[maxi]; if (min_i) *min_i = mini; if (max_i) *max_i = maxi; } float mean() { const int s = rows*cols; int cnt = 0;// channel*s; float average = 0; for (int c = 0; c < chans; c++) { const int t = chan_stride*c; for (int i = 0; i < s; i++) average += x[i + t]; } average = average / (float)(s*chans); return average; } float remove_mean(int channel) { int s = rows*cols; int offset = channel*chan_stride; float average=0; for(int i=0; i<s; i++) average+=x[i+offset]; average= average/(float)s; for(int i=0; i<s; i++) x[i+offset]-=average; return average; } float remove_mean() { float m=mean(); int s = chan_stride*chans; //int offset = channel*s; for(int i=0; i<s; i++) x[i]-=m; return m; } void fill(float val) { for(int i=0; i<_size; i++) x[i]=val; } void fill_random_uniform(float range) { std::mt19937 gen(0); std::uniform_real_distribution<float> dst(-range, range); for (int i = 0; i<_size; i++) x[i] = dst(gen); } void fill_random_normal(float std) { std::mt19937 gen(0); std::normal_distribution<float> dst(0, std); for (int i = 0; i<_size; i++) x[i] = dst(gen); } // deep copy inline matrix& operator =(const matrix &m) { resize(m.cols, m.rows, m.chans, m.chan_aligned); memcpy(x,m.x,sizeof(float)*_size); // memcpy(empty_chan, m.empty_chan, chans); return *this; } int size() const {return _size;} void resize(int _w, int _h, int _c, int align_chans=0) { chan_aligned = align_chans; int new_stride = calc_chan_stride(_w,_h); int s = new_stride*_c; if(s>_capacity) { if(_capacity>0) delete_x(); _size = s; _capacity=_size; x = new_x(_size); } cols = _w; rows = _h; chans = _c; _size = s; chan_stride = new_stride; } // dot vector to 2d mat inline matrix dot_1dx2d(const matrix &m_2d) const { mojo::matrix v(m_2d.rows, 1, 1); for(int j=0; j<m_2d.rows; j++) v.x[j]=dot(x,&m_2d.x[j*m_2d.cols],_size); return v; } // += inline matrix& operator+=(const matrix &m2){ for(int i = 0; i < _size; i++) x[i] += m2.x[i]; return *this; } // -= inline matrix& operator-=(const matrix &m2) { for (int i = 0; i < _size; i++) x[i] -= m2.x[i]; return *this; } #ifndef MOJO_AVX // *= float inline matrix operator *=(const float v) { for (int i = 0; i < _size; i++) x[i] = x[i] * v; return *this; } #else inline matrix operator *=(const float v) { __m128 b; b = _mm_set_ps(v, v, v, v); for (int j = 0; j < _size; j += 4) _mm_store_ps(x + j, _mm_mul_ps(_mm_load_ps(x + j), b)); return *this; } #endif // *= matrix inline matrix operator *=(const matrix &v) { for (int i = 0; i < _size; i++) x[i] = x[i] * v.x[i]; return *this; } inline matrix operator *(const matrix &v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] * v.x[i]; return T; } // * float inline matrix operator *(const float v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] * v; return T; } // + float inline matrix operator +(const float v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] + v; return T; } // + inline matrix operator +(matrix m2) { matrix T(cols,rows,chans); for(int i = 0; i < _size; i++) T.x[i] = x[i] + m2.x[i]; return T; } }; }// namespace
namedCritical.c
// OpenMP Named Critical Example // Inclusions #include <omp.h> #include <stdio.h> #include <stdlib.h> // Main int main( int argc, char** argv ) { int *a = malloc( 25 * sizeof( int ) ); // Array of Values int i = 0; // Loop Iterator int n = 25; // Number of Iteratins int localSum = 0; // Private Local Sum for Each Core int totalSum = 0; // Shared Total Sum for All Cores int thread = 0; // Thread Number float start = 0.0; // Start Time float end = 0.0; // End Time float time = 0.0; // Elapsed Time // Fill Array with Values 1 to 25 for( i = 0; i < n; i++ ) { a[i] = i + 1; } // Parallel Region #pragma omp parallel shared( n, a, totalSum ) private( thread, localSum ) // Share Number of Iterations, Array, and the Total Sum // Keep the Thread Number and Local Sum Private { thread = omp_get_thread_num( ); // Get the Thread Number localSum = 0; // Preset Local Sum to Zero #pragma omp for // Parallelize the Next For for( i = 0; i < n; i++ ) { localSum += a[i]; // Accumulate Array Values into Local Sum } start = omp_get_wtime( ); #pragma omp critical( totalSum ) // Named Critical Region - blocks separately. { totalSum += localSum; // Accumulate Local Sum Values into Total Sum printf( "Thread %d has local sum %d and adds to total sum %d.\n", thread, localSum, totalSum ); } #pragma omp critical( print ) // Named Critical Region - blocks separately. { printf( "I'm an unnamed critical region.\n" ); } end = omp_get_wtime( ); } time = end - start; printf( "Total sum at end is %d.\nTime: %0.9lf\n", totalSum, time ); free( a ); return 0; } // End namedCritical.c - EWG SDG
GB_unop__identity_int16_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_int64) // op(A') function: GB (_unop_tran__identity_int16_int64) // C type: int16_t // A type: int64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_int64) ( int16_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
semantics.c
/* Perform the semantic phase of parsing, i.e., the process of building tree structure, checking semantic consistency, and building RTL. These routines are used both during actual parsing and during the instantiation of template functions. Copyright (C) 1998-2015 Free Software Foundation, Inc. Written by Mark Mitchell (mmitchell@usa.net) based on code found formerly in parse.y and pt.c. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "hash-set.h" #include "machmode.h" #include "vec.h" #include "double-int.h" #include "input.h" #include "alias.h" #include "symtab.h" #include "wide-int.h" #include "inchash.h" #include "tree.h" #include "stmt.h" #include "varasm.h" #include "stor-layout.h" #include "stringpool.h" #include "cp-tree.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "tree-inline.h" #include "intl.h" #include "toplev.h" #include "flags.h" #include "timevar.h" #include "diagnostic.h" #include "hash-map.h" #include "is-a.h" #include "plugin-api.h" #include "hard-reg-set.h" #include "input.h" #include "function.h" #include "ipa-ref.h" #include "cgraph.h" #include "tree-iterator.h" #include "target.h" #include "hash-table.h" #include "gimplify.h" #include "bitmap.h" #include "omp-low.h" #include "builtins.h" #include "convert.h" #include "gomp-constants.h" /* There routines provide a modular interface to perform many parsing operations. They may therefore be used during actual parsing, or during template instantiation, which may be regarded as a degenerate form of parsing. */ static tree maybe_convert_cond (tree); static tree finalize_nrv_r (tree *, int *, void *); static tree capture_decltype (tree); /* Deferred Access Checking Overview --------------------------------- Most C++ expressions and declarations require access checking to be performed during parsing. However, in several cases, this has to be treated differently. For member declarations, access checking has to be deferred until more information about the declaration is known. For example: class A { typedef int X; public: X f(); }; A::X A::f(); A::X g(); When we are parsing the function return type `A::X', we don't really know if this is allowed until we parse the function name. Furthermore, some contexts require that access checking is never performed at all. These include class heads, and template instantiations. Typical use of access checking functions is described here: 1. When we enter a context that requires certain access checking mode, the function `push_deferring_access_checks' is called with DEFERRING argument specifying the desired mode. Access checking may be performed immediately (dk_no_deferred), deferred (dk_deferred), or not performed (dk_no_check). 2. When a declaration such as a type, or a variable, is encountered, the function `perform_or_defer_access_check' is called. It maintains a vector of all deferred checks. 3. The global `current_class_type' or `current_function_decl' is then setup by the parser. `enforce_access' relies on these information to check access. 4. Upon exiting the context mentioned in step 1, `perform_deferred_access_checks' is called to check all declaration stored in the vector. `pop_deferring_access_checks' is then called to restore the previous access checking mode. In case of parsing error, we simply call `pop_deferring_access_checks' without `perform_deferred_access_checks'. */ typedef struct GTY(()) deferred_access { /* A vector representing name-lookups for which we have deferred checking access controls. We cannot check the accessibility of names used in a decl-specifier-seq until we know what is being declared because code like: class A { class B {}; B* f(); } A::B* A::f() { return 0; } is valid, even though `A::B' is not generally accessible. */ vec<deferred_access_check, va_gc> * GTY(()) deferred_access_checks; /* The current mode of access checks. */ enum deferring_kind deferring_access_checks_kind; } deferred_access; /* Data for deferred access checking. */ static GTY(()) vec<deferred_access, va_gc> *deferred_access_stack; static GTY(()) unsigned deferred_access_no_check; /* Save the current deferred access states and start deferred access checking iff DEFER_P is true. */ void push_deferring_access_checks (deferring_kind deferring) { /* For context like template instantiation, access checking disabling applies to all nested context. */ if (deferred_access_no_check || deferring == dk_no_check) deferred_access_no_check++; else { deferred_access e = {NULL, deferring}; vec_safe_push (deferred_access_stack, e); } } /* Save the current deferred access states and start deferred access checking, continuing the set of deferred checks in CHECKS. */ void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> * checks) { push_deferring_access_checks (dk_deferred); if (!deferred_access_no_check) deferred_access_stack->last().deferred_access_checks = checks; } /* Resume deferring access checks again after we stopped doing this previously. */ void resume_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_deferred; } /* Stop deferring access checks. */ void stop_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_no_deferred; } /* Discard the current deferred access checks and restore the previous states. */ void pop_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else deferred_access_stack->pop (); } /* Returns a TREE_LIST representing the deferred checks. The TREE_PURPOSE of each node is the type through which the access occurred; the TREE_VALUE is the declaration named. */ vec<deferred_access_check, va_gc> * get_deferred_access_checks (void) { if (deferred_access_no_check) return NULL; else return (deferred_access_stack->last().deferred_access_checks); } /* Take current deferred checks and combine with the previous states if we also defer checks previously. Otherwise perform checks now. */ void pop_to_parent_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else { vec<deferred_access_check, va_gc> *checks; deferred_access *ptr; checks = (deferred_access_stack->last ().deferred_access_checks); deferred_access_stack->pop (); ptr = &deferred_access_stack->last (); if (ptr->deferring_access_checks_kind == dk_no_deferred) { /* Check access. */ perform_access_checks (checks, tf_warning_or_error); } else { /* Merge with parent. */ int i, j; deferred_access_check *chk, *probe; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, j, probe) { if (probe->binfo == chk->binfo && probe->decl == chk->decl && probe->diag_decl == chk->diag_decl) goto found; } /* Insert into parent's checks. */ vec_safe_push (ptr->deferred_access_checks, *chk); found:; } } } } /* Perform the access checks in CHECKS. The TREE_PURPOSE of each node is the BINFO indicating the qualifying scope used to access the DECL node stored in the TREE_VALUE of the node. If CHECKS is empty or we aren't in SFINAE context or all the checks succeed return TRUE, otherwise FALSE. */ bool perform_access_checks (vec<deferred_access_check, va_gc> *checks, tsubst_flags_t complain) { int i; deferred_access_check *chk; location_t loc = input_location; bool ok = true; if (!checks) return true; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { input_location = chk->loc; ok &= enforce_access (chk->binfo, chk->decl, chk->diag_decl, complain); } input_location = loc; return (complain & tf_error) ? true : ok; } /* Perform the deferred access checks. After performing the checks, we still have to keep the list `deferred_access_stack->deferred_access_checks' since we may want to check access for them again later in a different context. For example: class A { typedef int X; static X a; }; A::X A::a, x; // No error for `A::a', error for `x' We have to perform deferred access of `A::X', first with `A::a', next with `x'. Return value like perform_access_checks above. */ bool perform_deferred_access_checks (tsubst_flags_t complain) { return perform_access_checks (get_deferred_access_checks (), complain); } /* Defer checking the accessibility of DECL, when looked up in BINFO. DIAG_DECL is the declaration to use to print diagnostics. Return value like perform_access_checks above. */ bool perform_or_defer_access_check (tree binfo, tree decl, tree diag_decl, tsubst_flags_t complain) { int i; deferred_access *ptr; deferred_access_check *chk; /* Exit if we are in a context that no access checking is performed. */ if (deferred_access_no_check) return true; gcc_assert (TREE_CODE (binfo) == TREE_BINFO); ptr = &deferred_access_stack->last (); /* If we are not supposed to defer access checks, just check now. */ if (ptr->deferring_access_checks_kind == dk_no_deferred) { bool ok = enforce_access (binfo, decl, diag_decl, complain); return (complain & tf_error) ? true : ok; } /* See if we are already going to perform this check. */ FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, i, chk) { if (chk->decl == decl && chk->binfo == binfo && chk->diag_decl == diag_decl) { return true; } } /* If not, record the check. */ deferred_access_check new_access = {binfo, decl, diag_decl, input_location}; vec_safe_push (ptr->deferred_access_checks, new_access); return true; } /* Returns nonzero if the current statement is a full expression, i.e. temporaries created during that statement should be destroyed at the end of the statement. */ int stmts_are_full_exprs_p (void) { return current_stmt_tree ()->stmts_are_full_exprs_p; } /* T is a statement. Add it to the statement-tree. This is the C++ version. The C/ObjC frontends have a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (EXPR_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); /* When we expand a statement-tree, we must know whether or not the statements are full-expressions. We record that fact here. */ STMT_IS_FULL_EXPR_P (t) = stmts_are_full_exprs_p (); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ gcc_checking_assert (!stmt_list_stack->is_empty ()); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Returns the stmt_tree to which statements are currently being added. */ stmt_tree current_stmt_tree (void) { return (cfun ? &cfun->language->base.x_stmt_tree : &scope_chain->x_stmt_tree); } /* If statements are full expressions, wrap STMT in a CLEANUP_POINT_EXPR. */ static tree maybe_cleanup_point_expr (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (TREE_TYPE (expr), expr); return expr; } /* Like maybe_cleanup_point_expr except have the type of the new expression be void so we don't need to create a temporary variable to hold the inner expression. The reason why we do this is because the original type might be an aggregate and we cannot create a temporary variable for that type. */ tree maybe_cleanup_point_expr_void (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (void_type_node, expr); return expr; } /* Create a declaration statement for the declaration given by the DECL. */ void add_decl_expr (tree decl) { tree r = build_stmt (input_location, DECL_EXPR, decl); if (DECL_INITIAL (decl) || (DECL_SIZE (decl) && TREE_SIDE_EFFECTS (DECL_SIZE (decl)))) r = maybe_cleanup_point_expr_void (r); add_stmt (r); } /* Finish a scope. */ tree do_poplevel (tree stmt_list) { tree block = NULL; if (stmts_are_full_exprs_p ()) block = poplevel (kept_level_p (), 1, 0); stmt_list = pop_stmt_list (stmt_list); if (!processing_template_decl) { stmt_list = c_build_bind_expr (input_location, block, stmt_list); /* ??? See c_end_compound_stmt re statement expressions. */ } return stmt_list; } /* Begin a new scope. */ static tree do_pushlevel (scope_kind sk) { tree ret = push_stmt_list (); if (stmts_are_full_exprs_p ()) begin_scope (sk, NULL); return ret; } /* Queue a cleanup. CLEANUP is an expression/statement to be executed when the current scope is exited. EH_ONLY is true when this is not meant to apply to normal control flow transfer. */ void push_cleanup (tree decl, tree cleanup, bool eh_only) { tree stmt = build_stmt (input_location, CLEANUP_STMT, NULL, cleanup, decl); CLEANUP_EH_ONLY (stmt) = eh_only; add_stmt (stmt); CLEANUP_BODY (stmt) = push_stmt_list (); } /* Simple infinite loop tracking for -Wreturn-type. We keep a stack of all the current loops, represented by 'NULL_TREE' if we've seen a possible exit, and 'error_mark_node' if not. This is currently used only to suppress the warning about a function with no return statements, and therefore we don't bother noting returns as possible exits. We also don't bother with gotos. */ static void begin_maybe_infinite_loop (tree cond) { /* Only track this while parsing a function, not during instantiation. */ if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; bool maybe_infinite = true; if (cond) { cond = fold_non_dependent_expr (cond); maybe_infinite = integer_nonzerop (cond); } vec_safe_push (cp_function_chain->infinite_loops, maybe_infinite ? error_mark_node : NULL_TREE); } /* A break is a possible exit for the current loop. */ void break_maybe_infinite_loop (void) { if (!cfun) return; cp_function_chain->infinite_loops->last() = NULL_TREE; } /* If we reach the end of the loop without seeing a possible exit, we have an infinite loop. */ static void end_maybe_infinite_loop (tree cond) { if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; tree current = cp_function_chain->infinite_loops->pop(); if (current != NULL_TREE) { cond = fold_non_dependent_expr (cond); if (integer_nonzerop (cond)) current_function_infinite_loop = 1; } } /* Begin a conditional that might contain a declaration. When generating normal code, we want the declaration to appear before the statement containing the conditional. When generating template code, we want the conditional to be rendered as the raw DECL_EXPR. */ static void begin_cond (tree *cond_p) { if (processing_template_decl) *cond_p = push_stmt_list (); } /* Finish such a conditional. */ static void finish_cond (tree *cond_p, tree expr) { if (processing_template_decl) { tree cond = pop_stmt_list (*cond_p); if (expr == NULL_TREE) /* Empty condition in 'for'. */ gcc_assert (empty_expr_stmt_p (cond)); else if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; else if (!empty_expr_stmt_p (cond)) expr = build2 (COMPOUND_EXPR, TREE_TYPE (expr), cond, expr); } *cond_p = expr; } /* If *COND_P specifies a conditional with a declaration, transform the loop such that while (A x = 42) { } for (; A x = 42;) { } becomes while (true) { A x = 42; if (!x) break; } for (;;) { A x = 42; if (!x) break; } The statement list for BODY will be empty if the conditional did not declare anything. */ static void simplify_loop_decl_cond (tree *cond_p, tree body) { tree cond, if_stmt; if (!TREE_SIDE_EFFECTS (body)) return; cond = *cond_p; *cond_p = boolean_true_node; if_stmt = begin_if_stmt (); cond = cp_build_unary_op (TRUTH_NOT_EXPR, cond, 0, tf_warning_or_error); finish_if_stmt_cond (cond, if_stmt); finish_break_stmt (); finish_then_clause (if_stmt); finish_if_stmt (if_stmt); } /* Finish a goto-statement. */ tree finish_goto_stmt (tree destination) { if (identifier_p (destination)) destination = lookup_label (destination); /* We warn about unused labels with -Wunused. That means we have to mark the used labels as used. */ if (TREE_CODE (destination) == LABEL_DECL) TREE_USED (destination) = 1; else { if (check_no_cilk (destination, "Cilk array notation cannot be used as a computed goto expression", "%<_Cilk_spawn%> statement cannot be used as a computed goto expression")) destination = error_mark_node; destination = mark_rvalue_use (destination); if (!processing_template_decl) { destination = cp_convert (ptr_type_node, destination, tf_warning_or_error); if (error_operand_p (destination)) return NULL_TREE; destination = fold_build_cleanup_point_expr (TREE_TYPE (destination), destination); } } check_goto (destination); return add_stmt (build_stmt (input_location, GOTO_EXPR, destination)); } /* COND is the condition-expression for an if, while, etc., statement. Convert it to a boolean value, if appropriate. In addition, verify sequence points if -Wsequence-point is enabled. */ static tree maybe_convert_cond (tree cond) { /* Empty conditions remain empty. */ if (!cond) return NULL_TREE; /* Wait until we instantiate templates before doing conversion. */ if (processing_template_decl) return cond; if (warn_sequence_point) verify_sequence_points (cond); /* Do the conversion. */ cond = convert_from_reference (cond); if (TREE_CODE (cond) == MODIFY_EXPR && !TREE_NO_WARNING (cond) && warn_parentheses) { warning (OPT_Wparentheses, "suggest parentheses around assignment used as truth value"); TREE_NO_WARNING (cond) = 1; } return condition_conversion (cond); } /* Finish an expression-statement, whose EXPRESSION is as indicated. */ tree finish_expr_stmt (tree expr) { tree r = NULL_TREE; if (expr != NULL_TREE) { if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_STATEMENT, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_STATEMENT, tf_warning_or_error); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; /* Simplification of inner statement expressions, compound exprs, etc can result in us already having an EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) { if (TREE_CODE (expr) != EXPR_STMT) expr = build_stmt (input_location, EXPR_STMT, expr); expr = maybe_cleanup_point_expr_void (expr); } r = add_stmt (expr); } return r; } /* Begin an if-statement. Returns a newly created IF_STMT if appropriate. */ tree begin_if_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, IF_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); begin_cond (&IF_COND (r)); return r; } /* Process the COND of an if-statement, which may be given by IF_STMT. */ void finish_if_stmt_cond (tree cond, tree if_stmt) { finish_cond (&IF_COND (if_stmt), maybe_convert_cond (cond)); add_stmt (if_stmt); THEN_CLAUSE (if_stmt) = push_stmt_list (); } /* Finish the then-clause of an if-statement, which may be given by IF_STMT. */ tree finish_then_clause (tree if_stmt) { THEN_CLAUSE (if_stmt) = pop_stmt_list (THEN_CLAUSE (if_stmt)); return if_stmt; } /* Begin the else-clause of an if-statement. */ void begin_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = push_stmt_list (); } /* Finish the else-clause of an if-statement, which may be given by IF_STMT. */ void finish_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = pop_stmt_list (ELSE_CLAUSE (if_stmt)); } /* Finish an if-statement. */ void finish_if_stmt (tree if_stmt) { tree scope = IF_SCOPE (if_stmt); IF_SCOPE (if_stmt) = NULL; add_stmt (do_poplevel (scope)); } /* Begin a while-statement. Returns a newly created WHILE_STMT if appropriate. */ tree begin_while_stmt (void) { tree r; r = build_stmt (input_location, WHILE_STMT, NULL_TREE, NULL_TREE); add_stmt (r); WHILE_BODY (r) = do_pushlevel (sk_block); begin_cond (&WHILE_COND (r)); return r; } /* Process the COND of a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt_cond (tree cond, tree while_stmt, bool ivdep) { if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for while statement", "%<_Cilk_spawn%> statement cannot be used as a condition for while statement")) cond = error_mark_node; cond = maybe_convert_cond (cond); finish_cond (&WHILE_COND (while_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) WHILE_COND (while_stmt) = build2 (ANNOTATE_EXPR, TREE_TYPE (WHILE_COND (while_stmt)), WHILE_COND (while_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind)); simplify_loop_decl_cond (&WHILE_COND (while_stmt), WHILE_BODY (while_stmt)); } /* Finish a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt (tree while_stmt) { end_maybe_infinite_loop (boolean_true_node); WHILE_BODY (while_stmt) = do_poplevel (WHILE_BODY (while_stmt)); } /* Begin a do-statement. Returns a newly created DO_STMT if appropriate. */ tree begin_do_stmt (void) { tree r = build_stmt (input_location, DO_STMT, NULL_TREE, NULL_TREE); begin_maybe_infinite_loop (boolean_true_node); add_stmt (r); DO_BODY (r) = push_stmt_list (); return r; } /* Finish the body of a do-statement, which may be given by DO_STMT. */ void finish_do_body (tree do_stmt) { tree body = DO_BODY (do_stmt) = pop_stmt_list (DO_BODY (do_stmt)); if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_TAIL (body)) body = STATEMENT_LIST_TAIL (body)->stmt; if (IS_EMPTY_STMT (body)) warning (OPT_Wempty_body, "suggest explicit braces around empty body in %<do%> statement"); } /* Finish a do-statement, which may be given by DO_STMT, and whose COND is as indicated. */ void finish_do_stmt (tree cond, tree do_stmt, bool ivdep) { if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for a do-while statement", "%<_Cilk_spawn%> statement cannot be used as a condition for a do-while statement")) cond = error_mark_node; cond = maybe_convert_cond (cond); end_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) cond = build2 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind)); DO_COND (do_stmt) = cond; } /* Finish a return-statement. The EXPRESSION returned, if any, is as indicated. */ tree finish_return_stmt (tree expr) { tree r; bool no_warning; expr = check_return_expr (expr, &no_warning); if (error_operand_p (expr) || (flag_openmp && !check_omp_return ())) { /* Suppress -Wreturn-type for this function. */ if (warn_return_type) TREE_NO_WARNING (current_function_decl) = true; return error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); if (DECL_DESTRUCTOR_P (current_function_decl) || (DECL_CONSTRUCTOR_P (current_function_decl) && targetm.cxx.cdtor_returns_this ())) { /* Similarly, all destructors must run destructors for base-classes before returning. So, all returns in a destructor get sent to the DTOR_LABEL; finish_function emits code to return a value there. */ return finish_goto_stmt (cdtor_label); } } r = build_stmt (input_location, RETURN_EXPR, expr); TREE_NO_WARNING (r) |= no_warning; r = maybe_cleanup_point_expr_void (r); r = add_stmt (r); return r; } /* Begin the scope of a for-statement or a range-for-statement. Both the returned trees are to be used in a call to begin_for_stmt or begin_range_for_stmt. */ tree begin_for_scope (tree *init) { tree scope = NULL_TREE; if (flag_new_for_scope > 0) scope = do_pushlevel (sk_for); if (processing_template_decl) *init = push_stmt_list (); else *init = NULL_TREE; return scope; } /* Begin a for-statement. Returns a new FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE */ tree begin_for_stmt (tree scope, tree init) { tree r; r = build_stmt (input_location, FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init || !(flag_new_for_scope > 0)); if (!init) scope = begin_for_scope (&init); } FOR_INIT_STMT (r) = init; FOR_SCOPE (r) = scope; return r; } /* Finish the for-init-statement of a for-statement, which may be given by FOR_STMT. */ void finish_for_init_stmt (tree for_stmt) { if (processing_template_decl) FOR_INIT_STMT (for_stmt) = pop_stmt_list (FOR_INIT_STMT (for_stmt)); add_stmt (for_stmt); FOR_BODY (for_stmt) = do_pushlevel (sk_block); begin_cond (&FOR_COND (for_stmt)); } /* Finish the COND of a for-statement, which may be given by FOR_STMT. */ void finish_for_cond (tree cond, tree for_stmt, bool ivdep) { if (check_no_cilk (cond, "Cilk array notation cannot be used in a condition for a for-loop", "%<_Cilk_spawn%> statement cannot be used in a condition for a for-loop")) cond = error_mark_node; cond = maybe_convert_cond (cond); finish_cond (&FOR_COND (for_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) FOR_COND (for_stmt) = build2 (ANNOTATE_EXPR, TREE_TYPE (FOR_COND (for_stmt)), FOR_COND (for_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind)); simplify_loop_decl_cond (&FOR_COND (for_stmt), FOR_BODY (for_stmt)); } /* Finish the increment-EXPRESSION in a for-statement, which may be given by FOR_STMT. */ void finish_for_expr (tree expr, tree for_stmt) { if (!expr) return; /* If EXPR is an overloaded function, issue an error; there is no context available to use to perform overload resolution. */ if (type_unknown_p (expr)) { cxx_incomplete_type_error (expr, TREE_TYPE (expr)); expr = error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_THIRD_IN_FOR, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_THIRD_IN_FOR, tf_warning_or_error); expr = maybe_cleanup_point_expr_void (expr); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; FOR_EXPR (for_stmt) = expr; } /* Finish the body of a for-statement, which may be given by FOR_STMT. The increment-EXPR for the loop must be provided. It can also finish RANGE_FOR_STMT. */ void finish_for_stmt (tree for_stmt) { end_maybe_infinite_loop (boolean_true_node); if (TREE_CODE (for_stmt) == RANGE_FOR_STMT) RANGE_FOR_BODY (for_stmt) = do_poplevel (RANGE_FOR_BODY (for_stmt)); else FOR_BODY (for_stmt) = do_poplevel (FOR_BODY (for_stmt)); /* Pop the scope for the body of the loop. */ if (flag_new_for_scope > 0) { tree scope; tree *scope_ptr = (TREE_CODE (for_stmt) == RANGE_FOR_STMT ? &RANGE_FOR_SCOPE (for_stmt) : &FOR_SCOPE (for_stmt)); scope = *scope_ptr; *scope_ptr = NULL; add_stmt (do_poplevel (scope)); } } /* Begin a range-for-statement. Returns a new RANGE_FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE . To finish it call finish_for_stmt(). */ tree begin_range_for_stmt (tree scope, tree init) { tree r; begin_maybe_infinite_loop (boolean_false_node); r = build_stmt (input_location, RANGE_FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init || !(flag_new_for_scope > 0)); if (!init) scope = begin_for_scope (&init); } /* RANGE_FOR_STMTs do not use nor save the init tree, so we pop it now. */ if (init) pop_stmt_list (init); RANGE_FOR_SCOPE (r) = scope; return r; } /* Finish the head of a range-based for statement, which may be given by RANGE_FOR_STMT. DECL must be the declaration and EXPR must be the loop expression. */ void finish_range_for_decl (tree range_for_stmt, tree decl, tree expr) { RANGE_FOR_DECL (range_for_stmt) = decl; RANGE_FOR_EXPR (range_for_stmt) = expr; add_stmt (range_for_stmt); RANGE_FOR_BODY (range_for_stmt) = do_pushlevel (sk_block); } /* Finish a break-statement. */ tree finish_break_stmt (void) { /* In switch statements break is sometimes stylistically used after a return statement. This can lead to spurious warnings about control reaching the end of a non-void function when it is inlined. Note that we are calling block_may_fallthru with language specific tree nodes; this works because block_may_fallthru returns true when given something it does not understand. */ if (!block_may_fallthru (cur_stmt_list)) return void_node; return add_stmt (build_stmt (input_location, BREAK_STMT)); } /* Finish a continue-statement. */ tree finish_continue_stmt (void) { return add_stmt (build_stmt (input_location, CONTINUE_STMT)); } /* Begin a switch-statement. Returns a new SWITCH_STMT if appropriate. */ tree begin_switch_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, SWITCH_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); begin_cond (&SWITCH_STMT_COND (r)); return r; } /* Finish the cond of a switch-statement. */ void finish_switch_cond (tree cond, tree switch_stmt) { tree orig_type = NULL; if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for switch statement", "%<_Cilk_spawn%> statement cannot be used as a condition for switch statement")) cond = error_mark_node; if (!processing_template_decl) { /* Convert the condition to an integer or enumeration type. */ cond = build_expr_type_conversion (WANT_INT | WANT_ENUM, cond, true); if (cond == NULL_TREE) { error ("switch quantity not an integer"); cond = error_mark_node; } /* We want unlowered type here to handle enum bit-fields. */ orig_type = unlowered_expr_type (cond); if (TREE_CODE (orig_type) != ENUMERAL_TYPE) orig_type = TREE_TYPE (cond); if (cond != error_mark_node) { /* Warn if the condition has boolean value. */ if (TREE_CODE (orig_type) == BOOLEAN_TYPE) warning_at (input_location, OPT_Wswitch_bool, "switch condition has type bool"); /* [stmt.switch] Integral promotions are performed. */ cond = perform_integral_promotions (cond); cond = maybe_cleanup_point_expr (cond); } } if (check_for_bare_parameter_packs (cond)) cond = error_mark_node; else if (!processing_template_decl && warn_sequence_point) verify_sequence_points (cond); finish_cond (&SWITCH_STMT_COND (switch_stmt), cond); SWITCH_STMT_TYPE (switch_stmt) = orig_type; add_stmt (switch_stmt); push_switch (switch_stmt); SWITCH_STMT_BODY (switch_stmt) = push_stmt_list (); } /* Finish the body of a switch-statement, which may be given by SWITCH_STMT. The COND to switch on is indicated. */ void finish_switch_stmt (tree switch_stmt) { tree scope; SWITCH_STMT_BODY (switch_stmt) = pop_stmt_list (SWITCH_STMT_BODY (switch_stmt)); pop_switch (); scope = SWITCH_STMT_SCOPE (switch_stmt); SWITCH_STMT_SCOPE (switch_stmt) = NULL; add_stmt (do_poplevel (scope)); } /* Begin a try-block. Returns a newly-created TRY_BLOCK if appropriate. */ tree begin_try_block (void) { tree r = build_stmt (input_location, TRY_BLOCK, NULL_TREE, NULL_TREE); add_stmt (r); TRY_STMTS (r) = push_stmt_list (); return r; } /* Likewise, for a function-try-block. The block returned in *COMPOUND_STMT is an artificial outer scope, containing the function-try-block. */ tree begin_function_try_block (tree *compound_stmt) { tree r; /* This outer scope does not exist in the C++ standard, but we need a place to put __FUNCTION__ and similar variables. */ *compound_stmt = begin_compound_stmt (0); r = begin_try_block (); FN_TRY_BLOCK_P (r) = 1; return r; } /* Finish a try-block, which may be given by TRY_BLOCK. */ void finish_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); TRY_HANDLERS (try_block) = push_stmt_list (); } /* Finish the body of a cleanup try-block, which may be given by TRY_BLOCK. */ void finish_cleanup_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); } /* Finish an implicitly generated try-block, with a cleanup is given by CLEANUP. */ void finish_cleanup (tree cleanup, tree try_block) { TRY_HANDLERS (try_block) = cleanup; CLEANUP_P (try_block) = 1; } /* Likewise, for a function-try-block. */ void finish_function_try_block (tree try_block) { finish_try_block (try_block); /* FIXME : something queer about CTOR_INITIALIZER somehow following the try block, but moving it inside. */ in_function_try_handler = 1; } /* Finish a handler-sequence for a try-block, which may be given by TRY_BLOCK. */ void finish_handler_sequence (tree try_block) { TRY_HANDLERS (try_block) = pop_stmt_list (TRY_HANDLERS (try_block)); check_handlers (TRY_HANDLERS (try_block)); } /* Finish the handler-seq for a function-try-block, given by TRY_BLOCK. COMPOUND_STMT is the outer block created by begin_function_try_block. */ void finish_function_handler_sequence (tree try_block, tree compound_stmt) { in_function_try_handler = 0; finish_handler_sequence (try_block); finish_compound_stmt (compound_stmt); } /* Begin a handler. Returns a HANDLER if appropriate. */ tree begin_handler (void) { tree r; r = build_stmt (input_location, HANDLER, NULL_TREE, NULL_TREE); add_stmt (r); /* Create a binding level for the eh_info and the exception object cleanup. */ HANDLER_BODY (r) = do_pushlevel (sk_catch); return r; } /* Finish the handler-parameters for a handler, which may be given by HANDLER. DECL is the declaration for the catch parameter, or NULL if this is a `catch (...)' clause. */ void finish_handler_parms (tree decl, tree handler) { tree type = NULL_TREE; if (processing_template_decl) { if (decl) { decl = pushdecl (decl); decl = push_template_decl (decl); HANDLER_PARMS (handler) = decl; type = TREE_TYPE (decl); } } else type = expand_start_catch_block (decl); HANDLER_TYPE (handler) = type; } /* Finish a handler, which may be given by HANDLER. The BLOCKs are the return value from the matching call to finish_handler_parms. */ void finish_handler (tree handler) { if (!processing_template_decl) expand_end_catch_block (); HANDLER_BODY (handler) = do_poplevel (HANDLER_BODY (handler)); } /* Begin a compound statement. FLAGS contains some bits that control the behavior and context. If BCS_NO_SCOPE is set, the compound statement does not define a scope. If BCS_FN_BODY is set, this is the outermost block of a function. If BCS_TRY_BLOCK is set, this is the block created on behalf of a TRY statement. Returns a token to be passed to finish_compound_stmt. */ tree begin_compound_stmt (unsigned int flags) { tree r; if (flags & BCS_NO_SCOPE) { r = push_stmt_list (); STATEMENT_LIST_NO_SCOPE (r) = 1; /* Normally, we try hard to keep the BLOCK for a statement-expression. But, if it's a statement-expression with a scopeless block, there's nothing to keep, and we don't want to accidentally keep a block *inside* the scopeless block. */ keep_next_level (false); } else r = do_pushlevel (flags & BCS_TRY_BLOCK ? sk_try : sk_block); /* When processing a template, we need to remember where the braces were, so that we can set up identical scopes when instantiating the template later. BIND_EXPR is a handy candidate for this. Note that do_poplevel won't create a BIND_EXPR itself here (and thus result in nested BIND_EXPRs), since we don't build BLOCK nodes when processing templates. */ if (processing_template_decl) { r = build3 (BIND_EXPR, NULL, NULL, r, NULL); BIND_EXPR_TRY_BLOCK (r) = (flags & BCS_TRY_BLOCK) != 0; BIND_EXPR_BODY_BLOCK (r) = (flags & BCS_FN_BODY) != 0; TREE_SIDE_EFFECTS (r) = 1; } return r; } /* Finish a compound-statement, which is given by STMT. */ void finish_compound_stmt (tree stmt) { if (TREE_CODE (stmt) == BIND_EXPR) { tree body = do_poplevel (BIND_EXPR_BODY (stmt)); /* If the STATEMENT_LIST is empty and this BIND_EXPR isn't special, discard the BIND_EXPR so it can be merged with the containing STATEMENT_LIST. */ if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_HEAD (body) == NULL && !BIND_EXPR_BODY_BLOCK (stmt) && !BIND_EXPR_TRY_BLOCK (stmt)) stmt = body; else BIND_EXPR_BODY (stmt) = body; } else if (STATEMENT_LIST_NO_SCOPE (stmt)) stmt = pop_stmt_list (stmt); else { /* Destroy any ObjC "super" receivers that may have been created. */ objc_clear_super_receiver (); stmt = do_poplevel (stmt); } /* ??? See c_end_compound_stmt wrt statement expressions. */ add_stmt (stmt); } /* Finish an asm-statement, whose components are a STRING, some OUTPUT_OPERANDS, some INPUT_OPERANDS, some CLOBBERS and some LABELS. Also note whether the asm-statement should be considered volatile. */ tree finish_asm_stmt (int volatile_p, tree string, tree output_operands, tree input_operands, tree clobbers, tree labels) { tree r; tree t; int ninputs = list_length (input_operands); int noutputs = list_length (output_operands); if (!processing_template_decl) { const char *constraint; const char **oconstraints; bool allows_mem, allows_reg, is_inout; tree operand; int i; oconstraints = XALLOCAVEC (const char *, noutputs); string = resolve_asm_operand_names (string, output_operands, input_operands, labels); for (i = 0, t = output_operands; t; t = TREE_CHAIN (t), ++i) { operand = TREE_VALUE (t); /* ??? Really, this should not be here. Users should be using a proper lvalue, dammit. But there's a long history of using casts in the output operands. In cases like longlong.h, this becomes a primitive form of typechecking -- if the cast can be removed, then the output operand had a type of the proper width; otherwise we'll get an error. Gross, but ... */ STRIP_NOPS (operand); operand = mark_lvalue_use (operand); if (!lvalue_or_else (operand, lv_asm, tf_warning_or_error)) operand = error_mark_node; if (operand != error_mark_node && (TREE_READONLY (operand) || CP_TYPE_CONST_P (TREE_TYPE (operand)) /* Functions are not modifiable, even though they are lvalues. */ || TREE_CODE (TREE_TYPE (operand)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (operand)) == METHOD_TYPE /* If it's an aggregate and any field is const, then it is effectively const. */ || (CLASS_TYPE_P (TREE_TYPE (operand)) && C_TYPE_FIELDS_READONLY (TREE_TYPE (operand))))) cxx_readonly_error (operand, lv_asm); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); oconstraints[i] = constraint; if (parse_output_constraint (&constraint, i, ninputs, noutputs, &allows_mem, &allows_reg, &is_inout)) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && !cxx_mark_addressable (operand)) operand = error_mark_node; } else operand = error_mark_node; TREE_VALUE (t) = operand; } for (i = 0, t = input_operands; t; ++i, t = TREE_CHAIN (t)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); bool constraint_parsed = parse_input_constraint (&constraint, i, ninputs, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If the operand is going to end up in memory, don't call decay_conversion. */ if (constraint_parsed && !allows_reg && allows_mem) operand = mark_lvalue_use (TREE_VALUE (t)); else operand = decay_conversion (TREE_VALUE (t), tf_warning_or_error); /* If the type of the operand hasn't been determined (e.g., because it involves an overloaded function), then issue an error message. There's no context available to resolve the overloading. */ if (TREE_TYPE (operand) == unknown_type_node) { error ("type of asm operand %qE could not be determined", TREE_VALUE (t)); operand = error_mark_node; } if (constraint_parsed) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && allows_mem) { /* Strip the nops as we allow this case. FIXME, this really should be rejected or made deprecated. */ STRIP_NOPS (operand); if (!cxx_mark_addressable (operand)) operand = error_mark_node; } else if (!allows_reg && !allows_mem) { /* If constraint allows neither register nor memory, try harder to get a constant. */ tree constop = maybe_constant_value (operand); if (TREE_CONSTANT (constop)) operand = constop; } } else operand = error_mark_node; TREE_VALUE (t) = operand; } } r = build_stmt (input_location, ASM_EXPR, string, output_operands, input_operands, clobbers, labels); ASM_VOLATILE_P (r) = volatile_p || noutputs == 0; r = maybe_cleanup_point_expr_void (r); return add_stmt (r); } /* Finish a label with the indicated NAME. Returns the new label. */ tree finish_label_stmt (tree name) { tree decl = define_label (input_location, name); if (decl == error_mark_node) return error_mark_node; add_stmt (build_stmt (input_location, LABEL_EXPR, decl)); return decl; } /* Finish a series of declarations for local labels. G++ allows users to declare "local" labels, i.e., labels with scope. This extension is useful when writing code involving statement-expressions. */ void finish_label_decl (tree name) { if (!at_function_scope_p ()) { error ("__label__ declarations are only allowed in function scopes"); return; } add_decl_expr (declare_local_label (name)); } /* When DECL goes out of scope, make sure that CLEANUP is executed. */ void finish_decl_cleanup (tree decl, tree cleanup) { push_cleanup (decl, cleanup, false); } /* If the current scope exits with an exception, run CLEANUP. */ void finish_eh_cleanup (tree cleanup) { push_cleanup (NULL, cleanup, true); } /* The MEM_INITS is a list of mem-initializers, in reverse of the order they were written by the user. Each node is as for emit_mem_initializers. */ void finish_mem_initializers (tree mem_inits) { /* Reorder the MEM_INITS so that they are in the order they appeared in the source program. */ mem_inits = nreverse (mem_inits); if (processing_template_decl) { tree mem; for (mem = mem_inits; mem; mem = TREE_CHAIN (mem)) { /* If the TREE_PURPOSE is a TYPE_PACK_EXPANSION, skip the check for bare parameter packs in the TREE_VALUE, because any parameter packs in the TREE_VALUE have already been bound as part of the TREE_PURPOSE. See make_pack_expansion for more information. */ if (TREE_CODE (TREE_PURPOSE (mem)) != TYPE_PACK_EXPANSION && check_for_bare_parameter_packs (TREE_VALUE (mem))) TREE_VALUE (mem) = error_mark_node; } add_stmt (build_min_nt_loc (UNKNOWN_LOCATION, CTOR_INITIALIZER, mem_inits)); } else emit_mem_initializers (mem_inits); } /* Obfuscate EXPR if it looks like an id-expression or member access so that the call to finish_decltype in do_auto_deduction will give the right result. */ tree force_paren_expr (tree expr) { /* This is only needed for decltype(auto) in C++14. */ if (cxx_dialect < cxx14) return expr; /* If we're in unevaluated context, we can't be deducing a return/initializer type, so we don't need to mess with this. */ if (cp_unevaluated_operand) return expr; if (!DECL_P (expr) && TREE_CODE (expr) != COMPONENT_REF && TREE_CODE (expr) != SCOPE_REF) return expr; if (TREE_CODE (expr) == COMPONENT_REF) REF_PARENTHESIZED_P (expr) = true; else if (type_dependent_expression_p (expr)) expr = build1 (PAREN_EXPR, TREE_TYPE (expr), expr); else { cp_lvalue_kind kind = lvalue_kind (expr); if ((kind & ~clk_class) != clk_none) { tree type = unlowered_expr_type (expr); bool rval = !!(kind & clk_rvalueref); type = cp_build_reference_type (type, rval); /* This inhibits warnings in, eg, cxx_mark_addressable (c++/60955). */ warning_sentinel s (extra_warnings); expr = build_static_cast (type, expr, tf_error); if (expr != error_mark_node) REF_PARENTHESIZED_P (expr) = true; } } return expr; } /* Finish a parenthesized expression EXPR. */ tree finish_parenthesized_expr (tree expr) { if (EXPR_P (expr)) /* This inhibits warnings in c_common_truthvalue_conversion. */ TREE_NO_WARNING (expr) = 1; if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == SCOPE_REF) /* [expr.unary.op]/3 The qualified id of a pointer-to-member must not be enclosed in parentheses. */ PTRMEM_OK_P (expr) = 0; if (TREE_CODE (expr) == STRING_CST) PAREN_STRING_LITERAL_P (expr) = 1; expr = force_paren_expr (expr); return expr; } /* Finish a reference to a non-static data member (DECL) that is not preceded by `.' or `->'. */ tree finish_non_static_data_member (tree decl, tree object, tree qualifying_scope) { gcc_assert (TREE_CODE (decl) == FIELD_DECL); if (!object) { tree scope = qualifying_scope; if (scope == NULL_TREE) scope = context_for_name_lookup (decl); object = maybe_dummy_object (scope, NULL); } object = maybe_resolve_dummy (object, true); if (object == error_mark_node) return error_mark_node; /* DR 613/850: Can use non-static data members without an associated object in sizeof/decltype/alignof. */ if (is_dummy_object (object) && cp_unevaluated_operand == 0 && (!processing_template_decl || !current_class_ref)) { if (current_function_decl && DECL_STATIC_FUNCTION_P (current_function_decl)) error ("invalid use of member %qD in static member function", decl); else error ("invalid use of non-static data member %qD", decl); inform (DECL_SOURCE_LOCATION (decl), "declared here"); return error_mark_node; } if (current_class_ptr) TREE_USED (current_class_ptr) = 1; if (processing_template_decl && !qualifying_scope) { tree type = TREE_TYPE (decl); if (TREE_CODE (type) == REFERENCE_TYPE) /* Quals on the object don't matter. */; else if (PACK_EXPANSION_P (type)) /* Don't bother trying to represent this. */ type = NULL_TREE; else { /* Set the cv qualifiers. */ int quals = cp_type_quals (TREE_TYPE (object)); if (DECL_MUTABLE_P (decl)) quals &= ~TYPE_QUAL_CONST; quals |= cp_type_quals (TREE_TYPE (decl)); type = cp_build_qualified_type (type, quals); } return (convert_from_reference (build_min (COMPONENT_REF, type, object, decl, NULL_TREE))); } /* If PROCESSING_TEMPLATE_DECL is nonzero here, then QUALIFYING_SCOPE is also non-null. Wrap this in a SCOPE_REF for now. */ else if (processing_template_decl) return build_qualified_name (TREE_TYPE (decl), qualifying_scope, decl, /*template_p=*/false); else { tree access_type = TREE_TYPE (object); perform_or_defer_access_check (TYPE_BINFO (access_type), decl, decl, tf_warning_or_error); /* If the data member was named `C::M', convert `*this' to `C' first. */ if (qualifying_scope) { tree binfo = NULL_TREE; object = build_scoped_ref (object, qualifying_scope, &binfo); } return build_class_member_access_expr (object, decl, /*access_path=*/NULL_TREE, /*preserve_reference=*/false, tf_warning_or_error); } } /* If we are currently parsing a template and we encountered a typedef TYPEDEF_DECL that is being accessed though CONTEXT, this function adds the typedef to a list tied to the current template. At template instantiation time, that list is walked and access check performed for each typedef. LOCATION is the location of the usage point of TYPEDEF_DECL. */ void add_typedef_to_current_template_for_access_check (tree typedef_decl, tree context, location_t location) { tree template_info = NULL; tree cs = current_scope (); if (!is_typedef_decl (typedef_decl) || !context || !CLASS_TYPE_P (context) || !cs) return; if (CLASS_TYPE_P (cs) || TREE_CODE (cs) == FUNCTION_DECL) template_info = get_template_info (cs); if (template_info && TI_TEMPLATE (template_info) && !currently_open_class (context)) append_type_to_template_for_access_check (cs, typedef_decl, context, location); } /* DECL was the declaration to which a qualified-id resolved. Issue an error message if it is not accessible. If OBJECT_TYPE is non-NULL, we have just seen `x->' or `x.' and OBJECT_TYPE is the type of `*x', or `x', respectively. If the DECL was named as `A::B' then NESTED_NAME_SPECIFIER is `A'. */ void check_accessibility_of_qualified_id (tree decl, tree object_type, tree nested_name_specifier) { tree scope; tree qualifying_type = NULL_TREE; /* If we are parsing a template declaration and if decl is a typedef, add it to a list tied to the template. At template instantiation time, that list will be walked and access check performed. */ add_typedef_to_current_template_for_access_check (decl, nested_name_specifier ? nested_name_specifier : DECL_CONTEXT (decl), input_location); /* If we're not checking, return immediately. */ if (deferred_access_no_check) return; /* Determine the SCOPE of DECL. */ scope = context_for_name_lookup (decl); /* If the SCOPE is not a type, then DECL is not a member. */ if (!TYPE_P (scope)) return; /* Compute the scope through which DECL is being accessed. */ if (object_type /* OBJECT_TYPE might not be a class type; consider: class A { typedef int I; }; I *p; p->A::I::~I(); In this case, we will have "A::I" as the DECL, but "I" as the OBJECT_TYPE. */ && CLASS_TYPE_P (object_type) && DERIVED_FROM_P (scope, object_type)) /* If we are processing a `->' or `.' expression, use the type of the left-hand side. */ qualifying_type = object_type; else if (nested_name_specifier) { /* If the reference is to a non-static member of the current class, treat it as if it were referenced through `this'. */ tree ct; if (DECL_NONSTATIC_MEMBER_P (decl) && current_class_ptr && DERIVED_FROM_P (scope, ct = current_nonlambda_class_type ())) qualifying_type = ct; /* Otherwise, use the type indicated by the nested-name-specifier. */ else qualifying_type = nested_name_specifier; } else /* Otherwise, the name must be from the current class or one of its bases. */ qualifying_type = currently_open_derived_class (scope); if (qualifying_type /* It is possible for qualifying type to be a TEMPLATE_TYPE_PARM or similar in a default argument value. */ && CLASS_TYPE_P (qualifying_type) && !dependent_type_p (qualifying_type)) perform_or_defer_access_check (TYPE_BINFO (qualifying_type), decl, decl, tf_warning_or_error); } /* EXPR is the result of a qualified-id. The QUALIFYING_CLASS was the class named to the left of the "::" operator. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. */ tree finish_qualified_id_expr (tree qualifying_class, tree expr, bool done, bool address_p, bool template_p, bool template_arg_p, tsubst_flags_t complain) { gcc_assert (TYPE_P (qualifying_class)); if (error_operand_p (expr)) return error_mark_node; if ((DECL_P (expr) || BASELINK_P (expr)) && !mark_used (expr, complain)) return error_mark_node; if (template_p) check_template_keyword (expr); /* If EXPR occurs as the operand of '&', use special handling that permits a pointer-to-member. */ if (address_p && done) { if (TREE_CODE (expr) == SCOPE_REF) expr = TREE_OPERAND (expr, 1); expr = build_offset_ref (qualifying_class, expr, /*address_p=*/true, complain); return expr; } /* No need to check access within an enum. */ if (TREE_CODE (qualifying_class) == ENUMERAL_TYPE) return expr; /* Within the scope of a class, turn references to non-static members into expression of the form "this->...". */ if (template_arg_p) /* But, within a template argument, we do not want make the transformation, as there is no "this" pointer. */ ; else if (TREE_CODE (expr) == FIELD_DECL) { push_deferring_access_checks (dk_no_check); expr = finish_non_static_data_member (expr, NULL_TREE, qualifying_class); pop_deferring_access_checks (); } else if (BASELINK_P (expr) && !processing_template_decl) { /* See if any of the functions are non-static members. */ /* If so, the expression may be relative to 'this'. */ if (!shared_member_p (expr) && current_class_ptr && DERIVED_FROM_P (qualifying_class, current_nonlambda_class_type ())) expr = (build_class_member_access_expr (maybe_dummy_object (qualifying_class, NULL), expr, BASELINK_ACCESS_BINFO (expr), /*preserve_reference=*/false, complain)); else if (done) /* The expression is a qualified name whose address is not being taken. */ expr = build_offset_ref (qualifying_class, expr, /*address_p=*/false, complain); } else if (BASELINK_P (expr)) ; else { /* In a template, return a SCOPE_REF for most qualified-ids so that we can check access at instantiation time. But if we're looking at a member of the current instantiation, we know we have access and building up the SCOPE_REF confuses non-type template argument handling. */ if (processing_template_decl && !currently_open_class (qualifying_class)) expr = build_qualified_name (TREE_TYPE (expr), qualifying_class, expr, template_p); expr = convert_from_reference (expr); } return expr; } /* Begin a statement-expression. The value returned must be passed to finish_stmt_expr. */ tree begin_stmt_expr (void) { return push_stmt_list (); } /* Process the final expression of a statement expression. EXPR can be NULL, if the final expression is empty. Return a STATEMENT_LIST containing all the statements in the statement-expression, or ERROR_MARK_NODE if there was an error. */ tree finish_stmt_expr_expr (tree expr, tree stmt_expr) { if (error_operand_p (expr)) { /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = error_mark_node; return error_mark_node; } /* If the last statement does not have "void" type, then the value of the last statement is the value of the entire expression. */ if (expr) { tree type = TREE_TYPE (expr); if (processing_template_decl) { expr = build_stmt (input_location, EXPR_STMT, expr); expr = add_stmt (expr); /* Mark the last statement so that we can recognize it as such at template-instantiation time. */ EXPR_STMT_STMT_EXPR_RESULT (expr) = 1; } else if (VOID_TYPE_P (type)) { /* Just treat this like an ordinary statement. */ expr = finish_expr_stmt (expr); } else { /* It actually has a value we need to deal with. First, force it to be an rvalue so that we won't need to build up a copy constructor call later when we try to assign it to something. */ expr = force_rvalue (expr, tf_warning_or_error); if (error_operand_p (expr)) return error_mark_node; /* Update for array-to-pointer decay. */ type = TREE_TYPE (expr); /* Wrap it in a CLEANUP_POINT_EXPR and add it to the list like a normal statement, but don't convert to void or actually add the EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) expr = maybe_cleanup_point_expr (expr); add_stmt (expr); } /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = type; } return stmt_expr; } /* Finish a statement-expression. EXPR should be the value returned by the previous begin_stmt_expr. Returns an expression representing the statement-expression. */ tree finish_stmt_expr (tree stmt_expr, bool has_no_scope) { tree type; tree result; if (error_operand_p (stmt_expr)) { pop_stmt_list (stmt_expr); return error_mark_node; } gcc_assert (TREE_CODE (stmt_expr) == STATEMENT_LIST); type = TREE_TYPE (stmt_expr); result = pop_stmt_list (stmt_expr); TREE_TYPE (result) = type; if (processing_template_decl) { result = build_min (STMT_EXPR, type, result); TREE_SIDE_EFFECTS (result) = 1; STMT_EXPR_NO_SCOPE (result) = has_no_scope; } else if (CLASS_TYPE_P (type)) { /* Wrap the statement-expression in a TARGET_EXPR so that the temporary object created by the final expression is destroyed at the end of the full-expression containing the statement-expression. */ result = force_target_expr (type, result, tf_warning_or_error); } return result; } /* Returns the expression which provides the value of STMT_EXPR. */ tree stmt_expr_value_expr (tree stmt_expr) { tree t = STMT_EXPR_STMT (stmt_expr); if (TREE_CODE (t) == BIND_EXPR) t = BIND_EXPR_BODY (t); if (TREE_CODE (t) == STATEMENT_LIST && STATEMENT_LIST_TAIL (t)) t = STATEMENT_LIST_TAIL (t)->stmt; if (TREE_CODE (t) == EXPR_STMT) t = EXPR_STMT_EXPR (t); return t; } /* Return TRUE iff EXPR_STMT is an empty list of expression statements. */ bool empty_expr_stmt_p (tree expr_stmt) { tree body = NULL_TREE; if (expr_stmt == void_node) return true; if (expr_stmt) { if (TREE_CODE (expr_stmt) == EXPR_STMT) body = EXPR_STMT_EXPR (expr_stmt); else if (TREE_CODE (expr_stmt) == STATEMENT_LIST) body = expr_stmt; } if (body) { if (TREE_CODE (body) == STATEMENT_LIST) return tsi_end_p (tsi_start (body)); else return empty_expr_stmt_p (body); } return false; } /* Perform Koenig lookup. FN is the postfix-expression representing the function (or functions) to call; ARGS are the arguments to the call. Returns the functions to be considered by overload resolution. */ tree perform_koenig_lookup (tree fn, vec<tree, va_gc> *args, tsubst_flags_t complain) { tree identifier = NULL_TREE; tree functions = NULL_TREE; tree tmpl_args = NULL_TREE; bool template_id = false; if (TREE_CODE (fn) == TEMPLATE_ID_EXPR) { /* Use a separate flag to handle null args. */ template_id = true; tmpl_args = TREE_OPERAND (fn, 1); fn = TREE_OPERAND (fn, 0); } /* Find the name of the overloaded function. */ if (identifier_p (fn)) identifier = fn; else if (is_overloaded_fn (fn)) { functions = fn; identifier = DECL_NAME (get_first_fn (functions)); } else if (DECL_P (fn)) { functions = fn; identifier = DECL_NAME (fn); } /* A call to a namespace-scope function using an unqualified name. Do Koenig lookup -- unless any of the arguments are type-dependent. */ if (!any_type_dependent_arguments_p (args) && !any_dependent_template_arguments_p (tmpl_args)) { fn = lookup_arg_dependent (identifier, functions, args); if (!fn) { /* The unqualified name could not be resolved. */ if (complain) fn = unqualified_fn_lookup_error (identifier); else fn = identifier; } } if (fn && template_id) fn = build2 (TEMPLATE_ID_EXPR, unknown_type_node, fn, tmpl_args); return fn; } /* Generate an expression for `FN (ARGS)'. This may change the contents of ARGS. If DISALLOW_VIRTUAL is true, the call to FN will be not generated as a virtual call, even if FN is virtual. (This flag is set when encountering an expression where the function name is explicitly qualified. For example a call to `X::f' never generates a virtual call.) Returns code for the call. */ tree finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual, bool koenig_p, tsubst_flags_t complain) { tree result; tree orig_fn; vec<tree, va_gc> *orig_args = NULL; if (fn == error_mark_node) return error_mark_node; gcc_assert (!TYPE_P (fn)); orig_fn = fn; if (processing_template_decl) { /* If the call expression is dependent, build a CALL_EXPR node with no type; type_dependent_expression_p recognizes expressions with no type as being dependent. */ if (type_dependent_expression_p (fn) || any_type_dependent_arguments_p (*args) /* For a non-static member function that doesn't have an explicit object argument, we need to specifically test the type dependency of the "this" pointer because it is not included in *ARGS even though it is considered to be part of the list of arguments. Note that this is related to CWG issues 515 and 1005. */ || (TREE_CODE (fn) != COMPONENT_REF && non_static_member_function_p (fn) && current_class_ref && type_dependent_expression_p (current_class_ref))) { result = build_nt_call_vec (fn, *args); SET_EXPR_LOCATION (result, EXPR_LOC_OR_LOC (fn, input_location)); KOENIG_LOOKUP_P (result) = koenig_p; if (cfun) { do { tree fndecl = OVL_CURRENT (fn); if (TREE_CODE (fndecl) != FUNCTION_DECL || !TREE_THIS_VOLATILE (fndecl)) break; fn = OVL_NEXT (fn); } while (fn); if (!fn) current_function_returns_abnormally = 1; } return result; } orig_args = make_tree_vector_copy (*args); if (!BASELINK_P (fn) && TREE_CODE (fn) != PSEUDO_DTOR_EXPR && TREE_TYPE (fn) != unknown_type_node) fn = build_non_dependent_expr (fn); make_args_non_dependent (*args); } if (TREE_CODE (fn) == COMPONENT_REF) { tree member = TREE_OPERAND (fn, 1); if (BASELINK_P (member)) { tree object = TREE_OPERAND (fn, 0); return build_new_method_call (object, member, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL | LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } } /* Per 13.3.1.1, '(&f)(...)' is the same as '(f)(...)'. */ if (TREE_CODE (fn) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (fn, 0)) == OVERLOAD) fn = TREE_OPERAND (fn, 0); if (is_overloaded_fn (fn)) fn = baselink_for_fns (fn); result = NULL_TREE; if (BASELINK_P (fn)) { tree object; /* A call to a member function. From [over.call.func]: If the keyword this is in scope and refers to the class of that member function, or a derived class thereof, then the function call is transformed into a qualified function call using (*this) as the postfix-expression to the left of the . operator.... [Otherwise] a contrived object of type T becomes the implied object argument. In this situation: struct A { void f(); }; struct B : public A {}; struct C : public A { void g() { B::f(); }}; "the class of that member function" refers to `A'. But 11.2 [class.access.base] says that we need to convert 'this' to B* as part of the access, so we pass 'B' to maybe_dummy_object. */ object = maybe_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn)), NULL); if (processing_template_decl) { if (type_dependent_expression_p (object)) { tree ret = build_nt_call_vec (orig_fn, orig_args); release_tree_vector (orig_args); return ret; } object = build_non_dependent_expr (object); } result = build_new_method_call (object, fn, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } else if (is_overloaded_fn (fn)) { /* If the function is an overloaded builtin, resolve it. */ if (TREE_CODE (fn) == FUNCTION_DECL && (DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL || DECL_BUILT_IN_CLASS (fn) == BUILT_IN_MD)) result = resolve_overloaded_builtin (input_location, fn, *args); if (!result) { if (warn_sizeof_pointer_memaccess && !vec_safe_is_empty (*args) && !processing_template_decl) { location_t sizeof_arg_loc[3]; tree sizeof_arg[3]; unsigned int i; for (i = 0; i < 3; i++) { tree t; sizeof_arg_loc[i] = UNKNOWN_LOCATION; sizeof_arg[i] = NULL_TREE; if (i >= (*args)->length ()) continue; t = (**args)[i]; if (TREE_CODE (t) != SIZEOF_EXPR) continue; if (SIZEOF_EXPR_TYPE_P (t)) sizeof_arg[i] = TREE_TYPE (TREE_OPERAND (t, 0)); else sizeof_arg[i] = TREE_OPERAND (t, 0); sizeof_arg_loc[i] = EXPR_LOCATION (t); } sizeof_pointer_memaccess_warning (sizeof_arg_loc, fn, *args, sizeof_arg, same_type_ignoring_top_level_qualifiers_p); } /* A call to a namespace-scope function. */ result = build_new_function_call (fn, args, koenig_p, complain); } } else if (TREE_CODE (fn) == PSEUDO_DTOR_EXPR) { if (!vec_safe_is_empty (*args)) error ("arguments to destructor are not allowed"); /* Mark the pseudo-destructor call as having side-effects so that we do not issue warnings about its use. */ result = build1 (NOP_EXPR, void_type_node, TREE_OPERAND (fn, 0)); TREE_SIDE_EFFECTS (result) = 1; } else if (CLASS_TYPE_P (TREE_TYPE (fn))) /* If the "function" is really an object of class type, it might have an overloaded `operator ()'. */ result = build_op_call (fn, args, complain); if (!result) /* A call where the function is unknown. */ result = cp_build_function_call_vec (fn, args, complain); if (processing_template_decl && result != error_mark_node) { if (INDIRECT_REF_P (result)) result = TREE_OPERAND (result, 0); result = build_call_vec (TREE_TYPE (result), orig_fn, orig_args); SET_EXPR_LOCATION (result, input_location); KOENIG_LOOKUP_P (result) = koenig_p; release_tree_vector (orig_args); result = convert_from_reference (result); } if (koenig_p) { /* Free garbage OVERLOADs from arg-dependent lookup. */ tree next = NULL_TREE; for (fn = orig_fn; fn && TREE_CODE (fn) == OVERLOAD && OVL_ARG_DEPENDENT (fn); fn = next) { if (processing_template_decl) /* In a template, we'll re-use them at instantiation time. */ OVL_ARG_DEPENDENT (fn) = false; else { next = OVL_CHAIN (fn); ggc_free (fn); } } } return result; } /* Finish a call to a postfix increment or decrement or EXPR. (Which is indicated by CODE, which should be POSTINCREMENT_EXPR or POSTDECREMENT_EXPR.) */ tree finish_increment_expr (tree expr, enum tree_code code) { return build_x_unary_op (input_location, code, expr, tf_warning_or_error); } /* Finish a use of `this'. Returns an expression for `this'. */ tree finish_this_expr (void) { tree result = NULL_TREE; if (current_class_ptr) { tree type = TREE_TYPE (current_class_ref); /* In a lambda expression, 'this' refers to the captured 'this'. */ if (LAMBDA_TYPE_P (type)) result = lambda_expr_this_capture (CLASSTYPE_LAMBDA_EXPR (type), true); else result = current_class_ptr; } if (result) /* The keyword 'this' is a prvalue expression. */ return rvalue (result); tree fn = current_nonlambda_function (); if (fn && DECL_STATIC_FUNCTION_P (fn)) error ("%<this%> is unavailable for static member functions"); else if (fn) error ("invalid use of %<this%> in non-member function"); else error ("invalid use of %<this%> at top level"); return error_mark_node; } /* Finish a pseudo-destructor expression. If SCOPE is NULL, the expression was of the form `OBJECT.~DESTRUCTOR' where DESTRUCTOR is the TYPE for the type given. If SCOPE is non-NULL, the expression was of the form `OBJECT.SCOPE::~DESTRUCTOR'. */ tree finish_pseudo_destructor_expr (tree object, tree scope, tree destructor, location_t loc) { if (object == error_mark_node || destructor == error_mark_node) return error_mark_node; gcc_assert (TYPE_P (destructor)); if (!processing_template_decl) { if (scope == error_mark_node) { error_at (loc, "invalid qualifying scope in pseudo-destructor name"); return error_mark_node; } if (is_auto (destructor)) destructor = TREE_TYPE (object); if (scope && TYPE_P (scope) && !check_dtor_name (scope, destructor)) { error_at (loc, "qualified type %qT does not match destructor name ~%qT", scope, destructor); return error_mark_node; } /* [expr.pseudo] says both: The type designated by the pseudo-destructor-name shall be the same as the object type. and: The cv-unqualified versions of the object type and of the type designated by the pseudo-destructor-name shall be the same type. We implement the more generous second sentence, since that is what most other compilers do. */ if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (object), destructor)) { error_at (loc, "%qE is not of type %qT", object, destructor); return error_mark_node; } } return build3_loc (loc, PSEUDO_DTOR_EXPR, void_type_node, object, scope, destructor); } /* Finish an expression of the form CODE EXPR. */ tree finish_unary_op_expr (location_t loc, enum tree_code code, tree expr, tsubst_flags_t complain) { tree result = build_x_unary_op (loc, code, expr, complain); if ((complain & tf_warning) && TREE_OVERFLOW_P (result) && !TREE_OVERFLOW_P (expr)) overflow_warning (input_location, result); return result; } /* Finish a compound-literal expression. TYPE is the type to which the CONSTRUCTOR in COMPOUND_LITERAL is being cast. */ tree finish_compound_literal (tree type, tree compound_literal, tsubst_flags_t complain) { if (type == error_mark_node) return error_mark_node; if (TREE_CODE (type) == REFERENCE_TYPE) { compound_literal = finish_compound_literal (TREE_TYPE (type), compound_literal, complain); return cp_build_c_cast (type, compound_literal, complain); } if (!TYPE_OBJ_P (type)) { if (complain & tf_error) error ("compound literal of non-object type %qT", type); return error_mark_node; } if (processing_template_decl) { TREE_TYPE (compound_literal) = type; /* Mark the expression as a compound literal. */ TREE_HAS_CONSTRUCTOR (compound_literal) = 1; return compound_literal; } type = complete_type (type); if (TYPE_NON_AGGREGATE_CLASS (type)) { /* Trying to deal with a CONSTRUCTOR instead of a TREE_LIST everywhere that deals with function arguments would be a pain, so just wrap it in a TREE_LIST. The parser set a flag so we know that it came from T{} rather than T({}). */ CONSTRUCTOR_IS_DIRECT_INIT (compound_literal) = 1; compound_literal = build_tree_list (NULL_TREE, compound_literal); return build_functional_cast (type, compound_literal, complain); } if (TREE_CODE (type) == ARRAY_TYPE && check_array_initializer (NULL_TREE, type, compound_literal)) return error_mark_node; compound_literal = reshape_init (type, compound_literal, complain); if (SCALAR_TYPE_P (type) && !BRACE_ENCLOSED_INITIALIZER_P (compound_literal) && !check_narrowing (type, compound_literal, complain)) return error_mark_node; if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { cp_complete_array_type_or_error (&type, compound_literal, false, complain); if (type == error_mark_node) return error_mark_node; } compound_literal = digest_init (type, compound_literal, complain); if (TREE_CODE (compound_literal) == CONSTRUCTOR) TREE_HAS_CONSTRUCTOR (compound_literal) = true; /* Put static/constant array temporaries in static variables, but always represent class temporaries with TARGET_EXPR so we elide copies. */ if ((!at_function_scope_p () || CP_TYPE_CONST_P (type)) && TREE_CODE (type) == ARRAY_TYPE && !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type) && initializer_constant_valid_p (compound_literal, type)) { tree decl = create_temporary_var (type); DECL_INITIAL (decl) = compound_literal; TREE_STATIC (decl) = 1; if (literal_type_p (type) && CP_TYPE_CONST_NON_VOLATILE_P (type)) { /* 5.19 says that a constant expression can include an lvalue-rvalue conversion applied to "a glvalue of literal type that refers to a non-volatile temporary object initialized with a constant expression". Rather than try to communicate that this VAR_DECL is a temporary, just mark it constexpr. */ DECL_DECLARED_CONSTEXPR_P (decl) = true; DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = true; TREE_CONSTANT (decl) = true; } cp_apply_type_quals_to_decl (cp_type_quals (type), decl); decl = pushdecl_top_level (decl); DECL_NAME (decl) = make_anon_name (); SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl)); /* Make sure the destructor is callable. */ tree clean = cxx_maybe_build_cleanup (decl, complain); if (clean == error_mark_node) return error_mark_node; return decl; } else return get_target_expr_sfinae (compound_literal, complain); } /* Return the declaration for the function-name variable indicated by ID. */ tree finish_fname (tree id) { tree decl; decl = fname_decl (input_location, C_RID_CODE (id), id); if (processing_template_decl && current_function_decl && decl != error_mark_node) decl = DECL_NAME (decl); return decl; } /* Finish a translation unit. */ void finish_translation_unit (void) { /* In case there were missing closebraces, get us back to the global binding level. */ pop_everything (); while (current_namespace != global_namespace) pop_namespace (); /* Do file scope __FUNCTION__ et al. */ finish_fname_decls (); } /* Finish a template type parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_type_parm (tree aggr, tree identifier) { if (aggr != class_type_node) { permerror (input_location, "template type parameters must use the keyword %<class%> or %<typename%>"); aggr = class_type_node; } return build_tree_list (aggr, identifier); } /* Finish a template template parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_template_parm (tree aggr, tree identifier) { tree decl = build_decl (input_location, TYPE_DECL, identifier, NULL_TREE); tree tmpl = build_lang_decl (TEMPLATE_DECL, identifier, NULL_TREE); DECL_TEMPLATE_PARMS (tmpl) = current_template_parms; DECL_TEMPLATE_RESULT (tmpl) = decl; DECL_ARTIFICIAL (decl) = 1; end_template_decl (); gcc_assert (DECL_TEMPLATE_PARMS (tmpl)); check_default_tmpl_args (decl, DECL_TEMPLATE_PARMS (tmpl), /*is_primary=*/true, /*is_partial=*/false, /*is_friend=*/0); return finish_template_type_parm (aggr, tmpl); } /* ARGUMENT is the default-argument value for a template template parameter. If ARGUMENT is invalid, issue error messages and return the ERROR_MARK_NODE. Otherwise, ARGUMENT itself is returned. */ tree check_template_template_default_arg (tree argument) { if (TREE_CODE (argument) != TEMPLATE_DECL && TREE_CODE (argument) != TEMPLATE_TEMPLATE_PARM && TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE) { if (TREE_CODE (argument) == TYPE_DECL) error ("invalid use of type %qT as a default value for a template " "template-parameter", TREE_TYPE (argument)); else error ("invalid default argument for a template template parameter"); return error_mark_node; } return argument; } /* Begin a class definition, as indicated by T. */ tree begin_class_definition (tree t) { if (error_operand_p (t) || error_operand_p (TYPE_MAIN_DECL (t))) return error_mark_node; if (processing_template_parmlist) { error ("definition of %q#T inside template parameter list", t); return error_mark_node; } /* According to the C++ ABI, decimal classes defined in ISO/IEC TR 24733 are passed the same as decimal scalar types. */ if (TREE_CODE (t) == RECORD_TYPE && !processing_template_decl) { tree ns = TYPE_CONTEXT (t); if (ns && TREE_CODE (ns) == NAMESPACE_DECL && DECL_CONTEXT (ns) == std_node && DECL_NAME (ns) && !strcmp (IDENTIFIER_POINTER (DECL_NAME (ns)), "decimal")) { const char *n = TYPE_NAME_STRING (t); if ((strcmp (n, "decimal32") == 0) || (strcmp (n, "decimal64") == 0) || (strcmp (n, "decimal128") == 0)) TYPE_TRANSPARENT_AGGR (t) = 1; } } /* A non-implicit typename comes from code like: template <typename T> struct A { template <typename U> struct A<T>::B ... This is erroneous. */ else if (TREE_CODE (t) == TYPENAME_TYPE) { error ("invalid definition of qualified type %qT", t); t = error_mark_node; } if (t == error_mark_node || ! MAYBE_CLASS_TYPE_P (t)) { t = make_class_type (RECORD_TYPE); pushtag (make_anon_name (), t, /*tag_scope=*/ts_current); } if (TYPE_BEING_DEFINED (t)) { t = make_class_type (TREE_CODE (t)); pushtag (TYPE_IDENTIFIER (t), t, /*tag_scope=*/ts_current); } maybe_process_partial_specialization (t); pushclass (t); TYPE_BEING_DEFINED (t) = 1; class_binding_level->defining_class_p = 1; if (flag_pack_struct) { tree v; TYPE_PACKED (t) = 1; /* Even though the type is being defined for the first time here, there might have been a forward declaration, so there might be cv-qualified variants of T. */ for (v = TYPE_NEXT_VARIANT (t); v; v = TYPE_NEXT_VARIANT (v)) TYPE_PACKED (v) = 1; } /* Reset the interface data, at the earliest possible moment, as it might have been set via a class foo; before. */ if (! TYPE_ANONYMOUS_P (t)) { struct c_fileinfo *finfo = \ get_fileinfo (LOCATION_FILE (input_location)); CLASSTYPE_INTERFACE_ONLY (t) = finfo->interface_only; SET_CLASSTYPE_INTERFACE_UNKNOWN_X (t, finfo->interface_unknown); } reset_specialization(); /* Make a declaration for this class in its own scope. */ build_self_reference (); return t; } /* Finish the member declaration given by DECL. */ void finish_member_declaration (tree decl) { if (decl == error_mark_node || decl == NULL_TREE) return; if (decl == void_type_node) /* The COMPONENT was a friend, not a member, and so there's nothing for us to do. */ return; /* We should see only one DECL at a time. */ gcc_assert (DECL_CHAIN (decl) == NULL_TREE); /* Set up access control for DECL. */ TREE_PRIVATE (decl) = (current_access_specifier == access_private_node); TREE_PROTECTED (decl) = (current_access_specifier == access_protected_node); if (TREE_CODE (decl) == TEMPLATE_DECL) { TREE_PRIVATE (DECL_TEMPLATE_RESULT (decl)) = TREE_PRIVATE (decl); TREE_PROTECTED (DECL_TEMPLATE_RESULT (decl)) = TREE_PROTECTED (decl); } /* Mark the DECL as a member of the current class, unless it's a member of an enumeration. */ if (TREE_CODE (decl) != CONST_DECL) DECL_CONTEXT (decl) = current_class_type; /* Check for bare parameter packs in the member variable declaration. */ if (TREE_CODE (decl) == FIELD_DECL) { if (check_for_bare_parameter_packs (TREE_TYPE (decl))) TREE_TYPE (decl) = error_mark_node; if (check_for_bare_parameter_packs (DECL_ATTRIBUTES (decl))) DECL_ATTRIBUTES (decl) = NULL_TREE; } /* [dcl.link] A C language linkage is ignored for the names of class members and the member function type of class member functions. */ if (DECL_LANG_SPECIFIC (decl) && DECL_LANGUAGE (decl) == lang_c) SET_DECL_LANGUAGE (decl, lang_cplusplus); /* Put functions on the TYPE_METHODS list and everything else on the TYPE_FIELDS list. Note that these are built up in reverse order. We reverse them (to obtain declaration order) in finish_struct. */ if (DECL_DECLARES_FUNCTION_P (decl)) { /* We also need to add this function to the CLASSTYPE_METHOD_VEC. */ if (add_method (current_class_type, decl, NULL_TREE)) { DECL_CHAIN (decl) = TYPE_METHODS (current_class_type); TYPE_METHODS (current_class_type) = decl; maybe_add_class_template_decl_list (current_class_type, decl, /*friend_p=*/0); } } /* Enter the DECL into the scope of the class, if the class isn't a closure (whose fields are supposed to be unnamed). */ else if (CLASSTYPE_LAMBDA_EXPR (current_class_type) || pushdecl_class_level (decl)) { if (TREE_CODE (decl) == USING_DECL) { /* For now, ignore class-scope USING_DECLS, so that debugging backends do not see them. */ DECL_IGNORED_P (decl) = 1; } /* All TYPE_DECLs go at the end of TYPE_FIELDS. Ordinary fields go at the beginning. The reason is that lookup_field_1 searches the list in order, and we want a field name to override a type name so that the "struct stat hack" will work. In particular: struct S { enum E { }; int E } s; s.E = 3; is valid. In addition, the FIELD_DECLs must be maintained in declaration order so that class layout works as expected. However, we don't need that order until class layout, so we save a little time by putting FIELD_DECLs on in reverse order here, and then reversing them in finish_struct_1. (We could also keep a pointer to the correct insertion points in the list.) */ if (TREE_CODE (decl) == TYPE_DECL) TYPE_FIELDS (current_class_type) = chainon (TYPE_FIELDS (current_class_type), decl); else { DECL_CHAIN (decl) = TYPE_FIELDS (current_class_type); TYPE_FIELDS (current_class_type) = decl; } maybe_add_class_template_decl_list (current_class_type, decl, /*friend_p=*/0); } if (pch_file) note_decl_for_pch (decl); } /* DECL has been declared while we are building a PCH file. Perform actions that we might normally undertake lazily, but which can be performed now so that they do not have to be performed in translation units which include the PCH file. */ void note_decl_for_pch (tree decl) { gcc_assert (pch_file); /* There's a good chance that we'll have to mangle names at some point, even if only for emission in debugging information. */ if (VAR_OR_FUNCTION_DECL_P (decl) && !processing_template_decl) mangle_decl (decl); } /* Finish processing a complete template declaration. The PARMS are the template parameters. */ void finish_template_decl (tree parms) { if (parms) end_template_decl (); else end_specialization (); } /* Finish processing a template-id (which names a type) of the form NAME < ARGS >. Return the TYPE_DECL for the type named by the template-id. If ENTERING_SCOPE is nonzero we are about to enter the scope of template-id indicated. */ tree finish_template_type (tree name, tree args, int entering_scope) { tree type; type = lookup_template_class (name, args, NULL_TREE, NULL_TREE, entering_scope, tf_warning_or_error | tf_user); if (type == error_mark_node) return type; else if (CLASS_TYPE_P (type) && !alias_type_or_template_p (type)) return TYPE_STUB_DECL (type); else return TYPE_NAME (type); } /* Finish processing a BASE_CLASS with the indicated ACCESS_SPECIFIER. Return a TREE_LIST containing the ACCESS_SPECIFIER and the BASE_CLASS, or NULL_TREE if an error occurred. The ACCESS_SPECIFIER is one of access_{default,public,protected_private}_node. For a virtual base we set TREE_TYPE. */ tree finish_base_specifier (tree base, tree access, bool virtual_p) { tree result; if (base == error_mark_node) { error ("invalid base-class specification"); result = NULL_TREE; } else if (! MAYBE_CLASS_TYPE_P (base)) { error ("%qT is not a class type", base); result = NULL_TREE; } else { if (cp_type_quals (base) != 0) { /* DR 484: Can a base-specifier name a cv-qualified class type? */ base = TYPE_MAIN_VARIANT (base); } result = build_tree_list (access, base); if (virtual_p) TREE_TYPE (result) = integer_type_node; } return result; } /* If FNS is a member function, a set of member functions, or a template-id referring to one or more member functions, return a BASELINK for FNS, incorporating the current access context. Otherwise, return FNS unchanged. */ tree baselink_for_fns (tree fns) { tree scope; tree cl; if (BASELINK_P (fns) || error_operand_p (fns)) return fns; scope = ovl_scope (fns); if (!CLASS_TYPE_P (scope)) return fns; cl = currently_open_derived_class (scope); if (!cl) cl = scope; cl = TYPE_BINFO (cl); return build_baselink (cl, cl, fns, /*optype=*/NULL_TREE); } /* Returns true iff DECL is a variable from a function outside the current one. */ static bool outer_var_p (tree decl) { return ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL) && DECL_FUNCTION_SCOPE_P (decl) && (DECL_CONTEXT (decl) != current_function_decl || parsing_nsdmi ())); } /* As above, but also checks that DECL is automatic. */ bool outer_automatic_var_p (tree decl) { return (outer_var_p (decl) && !TREE_STATIC (decl)); } /* DECL satisfies outer_automatic_var_p. Possibly complain about it or rewrite it for lambda capture. */ tree process_outer_var_ref (tree decl, tsubst_flags_t complain) { if (cp_unevaluated_operand) /* It's not a use (3.2) if we're in an unevaluated context. */ return decl; if (decl == error_mark_node) return decl; tree context = DECL_CONTEXT (decl); tree containing_function = current_function_decl; tree lambda_stack = NULL_TREE; tree lambda_expr = NULL_TREE; tree initializer = convert_from_reference (decl); /* Mark it as used now even if the use is ill-formed. */ if (!mark_used (decl, complain) && !(complain & tf_error)) return error_mark_node; /* Core issue 696: "[At the July 2009 meeting] the CWG expressed support for an approach in which a reference to a local [constant] automatic variable in a nested class or lambda body would enter the expression as an rvalue, which would reduce the complexity of the problem" FIXME update for final resolution of core issue 696. */ if (decl_maybe_constant_var_p (decl)) { if (processing_template_decl) /* In a template, the constant value may not be in a usable form, so wait until instantiation time. */ return decl; else if (decl_constant_var_p (decl)) { tree t = maybe_constant_value (convert_from_reference (decl)); if (TREE_CONSTANT (t)) return t; } } if (parsing_nsdmi ()) containing_function = NULL_TREE; else /* If we are in a lambda function, we can move out until we hit 1. the context, 2. a non-lambda function, or 3. a non-default capturing lambda function. */ while (context != containing_function && LAMBDA_FUNCTION_P (containing_function)) { tree closure = DECL_CONTEXT (containing_function); lambda_expr = CLASSTYPE_LAMBDA_EXPR (closure); if (TYPE_CLASS_SCOPE_P (closure)) /* A lambda in an NSDMI (c++/64496). */ break; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) break; lambda_stack = tree_cons (NULL_TREE, lambda_expr, lambda_stack); containing_function = decl_function_context (containing_function); } if (lambda_expr && TREE_CODE (decl) == VAR_DECL && DECL_ANON_UNION_VAR_P (decl)) { if (complain & tf_error) error ("cannot capture member %qD of anonymous union", decl); return error_mark_node; } if (context == containing_function) { decl = add_default_capture (lambda_stack, /*id=*/DECL_NAME (decl), initializer); } else if (lambda_expr) { if (complain & tf_error) { error ("%qD is not captured", decl); tree closure = LAMBDA_EXPR_CLOSURE (lambda_expr); if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) inform (location_of (closure), "the lambda has no capture-default"); else if (TYPE_CLASS_SCOPE_P (closure)) inform (0, "lambda in local class %q+T cannot " "capture variables from the enclosing context", TYPE_CONTEXT (closure)); inform (input_location, "%q+#D declared here", decl); } return error_mark_node; } else { if (complain & tf_error) error (VAR_P (decl) ? G_("use of local variable with automatic storage from containing function") : G_("use of parameter from containing function")); inform (input_location, "%q+#D declared here", decl); return error_mark_node; } return decl; } /* ID_EXPRESSION is a representation of parsed, but unprocessed, id-expression. (See cp_parser_id_expression for details.) SCOPE, if non-NULL, is the type or namespace used to explicitly qualify ID_EXPRESSION. DECL is the entity to which that name has been resolved. *CONSTANT_EXPRESSION_P is true if we are presently parsing a constant-expression. In that case, *NON_CONSTANT_EXPRESSION_P will be set to true if this expression isn't permitted in a constant-expression, but it is otherwise not set by this function. *ALLOW_NON_CONSTANT_EXPRESSION_P is true if we are parsing a constant-expression, but a non-constant expression is also permissible. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. If an error occurs, and it is the kind of error that might cause the parser to abort a tentative parse, *ERROR_MSG is filled in. It is the caller's responsibility to issue the message. *ERROR_MSG will be a string with static storage duration, so the caller need not "free" it. Return an expression for the entity, after issuing appropriate diagnostics. This function is also responsible for transforming a reference to a non-static member into a COMPONENT_REF that makes the use of "this" explicit. Upon return, *IDK will be filled in appropriately. */ tree finish_id_expression (tree id_expression, tree decl, tree scope, cp_id_kind *idk, bool integral_constant_expression_p, bool allow_non_integral_constant_expression_p, bool *non_integral_constant_expression_p, bool template_p, bool done, bool address_p, bool template_arg_p, const char **error_msg, location_t location) { decl = strip_using_decl (decl); /* Initialize the output parameters. */ *idk = CP_ID_KIND_NONE; *error_msg = NULL; if (id_expression == error_mark_node) return error_mark_node; /* If we have a template-id, then no further lookup is required. If the template-id was for a template-class, we will sometimes have a TYPE_DECL at this point. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR || TREE_CODE (decl) == TYPE_DECL) ; /* Look up the name. */ else { if (decl == error_mark_node) { /* Name lookup failed. */ if (scope && (!TYPE_P (scope) || (!dependent_type_p (scope) && !(identifier_p (id_expression) && IDENTIFIER_TYPENAME_P (id_expression) && dependent_type_p (TREE_TYPE (id_expression)))))) { /* If the qualifying type is non-dependent (and the name does not name a conversion operator to a dependent type), issue an error. */ qualified_name_lookup_error (scope, id_expression, decl, location); return error_mark_node; } else if (!scope) { /* It may be resolved via Koenig lookup. */ *idk = CP_ID_KIND_UNQUALIFIED; return id_expression; } else decl = id_expression; } /* If DECL is a variable that would be out of scope under ANSI/ISO rules, but in scope in the ARM, name lookup will succeed. Issue a diagnostic here. */ else decl = check_for_out_of_scope_variable (decl); /* Remember that the name was used in the definition of the current class so that we can check later to see if the meaning would have been different after the class was entirely defined. */ if (!scope && decl != error_mark_node && identifier_p (id_expression)) maybe_note_name_used_in_class (id_expression, decl); /* Disallow uses of local variables from containing functions, except within lambda-expressions. */ if (outer_automatic_var_p (decl)) { decl = process_outer_var_ref (decl, tf_warning_or_error); if (decl == error_mark_node) return error_mark_node; } /* Also disallow uses of function parameters outside the function body, except inside an unevaluated context (i.e. decltype). */ if (TREE_CODE (decl) == PARM_DECL && DECL_CONTEXT (decl) == NULL_TREE && !cp_unevaluated_operand) { *error_msg = "use of parameter outside function body"; return error_mark_node; } } /* If we didn't find anything, or what we found was a type, then this wasn't really an id-expression. */ if (TREE_CODE (decl) == TEMPLATE_DECL && !DECL_FUNCTION_TEMPLATE_P (decl)) { *error_msg = "missing template arguments"; return error_mark_node; } else if (TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == NAMESPACE_DECL) { *error_msg = "expected primary-expression"; return error_mark_node; } /* If the name resolved to a template parameter, there is no need to look it up again later. */ if ((TREE_CODE (decl) == CONST_DECL && DECL_TEMPLATE_PARM_P (decl)) || TREE_CODE (decl) == TEMPLATE_PARM_INDEX) { tree r; *idk = CP_ID_KIND_NONE; if (TREE_CODE (decl) == TEMPLATE_PARM_INDEX) decl = TEMPLATE_PARM_DECL (decl); r = convert_from_reference (DECL_INITIAL (decl)); if (integral_constant_expression_p && !dependent_type_p (TREE_TYPE (decl)) && !(INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (r)))) { if (!allow_non_integral_constant_expression_p) error ("template parameter %qD of type %qT is not allowed in " "an integral constant expression because it is not of " "integral or enumeration type", decl, TREE_TYPE (decl)); *non_integral_constant_expression_p = true; } return r; } else { bool dependent_p; /* If the declaration was explicitly qualified indicate that. The semantics of `A::f(3)' are different than `f(3)' if `f' is virtual. */ *idk = (scope ? CP_ID_KIND_QUALIFIED : (TREE_CODE (decl) == TEMPLATE_ID_EXPR ? CP_ID_KIND_TEMPLATE_ID : CP_ID_KIND_UNQUALIFIED)); /* [temp.dep.expr] An id-expression is type-dependent if it contains an identifier that was declared with a dependent type. The standard is not very specific about an id-expression that names a set of overloaded functions. What if some of them have dependent types and some of them do not? Presumably, such a name should be treated as a dependent name. */ /* Assume the name is not dependent. */ dependent_p = false; if (!processing_template_decl) /* No names are dependent outside a template. */ ; else if (TREE_CODE (decl) == CONST_DECL) /* We don't want to treat enumerators as dependent. */ ; /* A template-id where the name of the template was not resolved is definitely dependent. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && (identifier_p (TREE_OPERAND (decl, 0)))) dependent_p = true; /* For anything except an overloaded function, just check its type. */ else if (!is_overloaded_fn (decl)) dependent_p = dependent_type_p (TREE_TYPE (decl)); /* For a set of overloaded functions, check each of the functions. */ else { tree fns = decl; if (BASELINK_P (fns)) fns = BASELINK_FUNCTIONS (fns); /* For a template-id, check to see if the template arguments are dependent. */ if (TREE_CODE (fns) == TEMPLATE_ID_EXPR) { tree args = TREE_OPERAND (fns, 1); dependent_p = any_dependent_template_arguments_p (args); /* The functions are those referred to by the template-id. */ fns = TREE_OPERAND (fns, 0); } /* If there are no dependent template arguments, go through the overloaded functions. */ while (fns && !dependent_p) { tree fn = OVL_CURRENT (fns); /* Member functions of dependent classes are dependent. */ if (TREE_CODE (fn) == FUNCTION_DECL && type_dependent_expression_p (fn)) dependent_p = true; else if (TREE_CODE (fn) == TEMPLATE_DECL && dependent_template_p (fn)) dependent_p = true; fns = OVL_NEXT (fns); } } /* If the name was dependent on a template parameter, we will resolve the name at instantiation time. */ if (dependent_p) { /* Create a SCOPE_REF for qualified names, if the scope is dependent. */ if (scope) { if (TYPE_P (scope)) { if (address_p && done) decl = finish_qualified_id_expr (scope, decl, done, address_p, template_p, template_arg_p, tf_warning_or_error); else { tree type = NULL_TREE; if (DECL_P (decl) && !dependent_scope_p (scope)) type = TREE_TYPE (decl); decl = build_qualified_name (type, scope, id_expression, template_p); } } if (TREE_TYPE (decl)) decl = convert_from_reference (decl); return decl; } /* A TEMPLATE_ID already contains all the information we need. */ if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR) return id_expression; *idk = CP_ID_KIND_UNQUALIFIED_DEPENDENT; /* If we found a variable, then name lookup during the instantiation will always resolve to the same VAR_DECL (or an instantiation thereof). */ if (VAR_P (decl) || TREE_CODE (decl) == PARM_DECL) { mark_used (decl); return convert_from_reference (decl); } /* The same is true for FIELD_DECL, but we also need to make sure that the syntax is correct. */ else if (TREE_CODE (decl) == FIELD_DECL) { /* Since SCOPE is NULL here, this is an unqualified name. Access checking has been performed during name lookup already. Turn off checking to avoid duplicate errors. */ push_deferring_access_checks (dk_no_check); decl = finish_non_static_data_member (decl, NULL_TREE, /*qualifying_scope=*/NULL_TREE); pop_deferring_access_checks (); return decl; } return id_expression; } if (TREE_CODE (decl) == NAMESPACE_DECL) { error ("use of namespace %qD as expression", decl); return error_mark_node; } else if (DECL_CLASS_TEMPLATE_P (decl)) { error ("use of class template %qT as expression", decl); return error_mark_node; } else if (TREE_CODE (decl) == TREE_LIST) { /* Ambiguous reference to base members. */ error ("request for member %qD is ambiguous in " "multiple inheritance lattice", id_expression); print_candidates (decl); return error_mark_node; } /* Mark variable-like entities as used. Functions are similarly marked either below or after overload resolution. */ if ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == CONST_DECL || TREE_CODE (decl) == RESULT_DECL) && !mark_used (decl)) return error_mark_node; /* Only certain kinds of names are allowed in constant expression. Template parameters have already been handled above. */ if (! error_operand_p (decl) && integral_constant_expression_p && ! decl_constant_var_p (decl) && TREE_CODE (decl) != CONST_DECL && ! builtin_valid_in_constant_expr_p (decl)) { if (!allow_non_integral_constant_expression_p) { error ("%qD cannot appear in a constant-expression", decl); return error_mark_node; } *non_integral_constant_expression_p = true; } tree wrap; if (VAR_P (decl) && !cp_unevaluated_operand && !processing_template_decl && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) && DECL_THREAD_LOCAL_P (decl) && (wrap = get_tls_wrapper_fn (decl))) { /* Replace an evaluated use of the thread_local variable with a call to its wrapper. */ decl = build_cxx_call (wrap, 0, NULL, tf_warning_or_error); } else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && variable_template_p (TREE_OPERAND (decl, 0))) { decl = finish_template_variable (decl); mark_used (decl); decl = convert_from_reference (decl); } else if (scope) { decl = (adjust_result_of_qualified_name_lookup (decl, scope, current_nonlambda_class_type())); if (TREE_CODE (decl) == FUNCTION_DECL) mark_used (decl); if (TYPE_P (scope)) decl = finish_qualified_id_expr (scope, decl, done, address_p, template_p, template_arg_p, tf_warning_or_error); else decl = convert_from_reference (decl); } else if (TREE_CODE (decl) == FIELD_DECL) { /* Since SCOPE is NULL here, this is an unqualified name. Access checking has been performed during name lookup already. Turn off checking to avoid duplicate errors. */ push_deferring_access_checks (dk_no_check); decl = finish_non_static_data_member (decl, NULL_TREE, /*qualifying_scope=*/NULL_TREE); pop_deferring_access_checks (); } else if (is_overloaded_fn (decl)) { tree first_fn; first_fn = get_first_fn (decl); if (TREE_CODE (first_fn) == TEMPLATE_DECL) first_fn = DECL_TEMPLATE_RESULT (first_fn); if (!really_overloaded_fn (decl) && !mark_used (first_fn)) return error_mark_node; if (!template_arg_p && TREE_CODE (first_fn) == FUNCTION_DECL && DECL_FUNCTION_MEMBER_P (first_fn) && !shared_member_p (decl)) { /* A set of member functions. */ decl = maybe_dummy_object (DECL_CONTEXT (first_fn), 0); return finish_class_member_access_expr (decl, id_expression, /*template_p=*/false, tf_warning_or_error); } decl = baselink_for_fns (decl); } else { if (DECL_P (decl) && DECL_NONLOCAL (decl) && DECL_CLASS_SCOPE_P (decl)) { tree context = context_for_name_lookup (decl); if (context != current_class_type) { tree path = currently_open_derived_class (context); perform_or_defer_access_check (TYPE_BINFO (path), decl, decl, tf_warning_or_error); } } decl = convert_from_reference (decl); } } /* Handle references (c++/56130). */ tree t = REFERENCE_REF_P (decl) ? TREE_OPERAND (decl, 0) : decl; if (TREE_DEPRECATED (t)) warn_deprecated_use (t, NULL_TREE); return decl; } /* Implement the __typeof keyword: Return the type of EXPR, suitable for use as a type-specifier. */ tree finish_typeof (tree expr) { tree type; if (type_dependent_expression_p (expr)) { type = cxx_make_type (TYPEOF_TYPE); TYPEOF_TYPE_EXPR (type) = expr; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } expr = mark_type_use (expr); type = unlowered_expr_type (expr); if (!type || type == unknown_type_node) { error ("type of %qE is unknown", expr); return error_mark_node; } return type; } /* Implement the __underlying_type keyword: Return the underlying type of TYPE, suitable for use as a type-specifier. */ tree finish_underlying_type (tree type) { tree underlying_type; if (processing_template_decl) { underlying_type = cxx_make_type (UNDERLYING_TYPE); UNDERLYING_TYPE_TYPE (underlying_type) = type; SET_TYPE_STRUCTURAL_EQUALITY (underlying_type); return underlying_type; } complete_type (type); if (TREE_CODE (type) != ENUMERAL_TYPE) { error ("%qT is not an enumeration type", type); return error_mark_node; } underlying_type = ENUM_UNDERLYING_TYPE (type); /* Fixup necessary in this case because ENUM_UNDERLYING_TYPE includes TYPE_MIN_VALUE and TYPE_MAX_VALUE information. See finish_enum_value_list for details. */ if (!ENUM_FIXED_UNDERLYING_TYPE_P (type)) underlying_type = c_common_type_for_mode (TYPE_MODE (underlying_type), TYPE_UNSIGNED (underlying_type)); return underlying_type; } /* Implement the __direct_bases keyword: Return the direct base classes of type */ tree calculate_direct_bases (tree type) { vec<tree, va_gc> *vector = make_tree_vector(); tree bases_vec = NULL_TREE; vec<tree, va_gc> *base_binfos; tree binfo; unsigned i; complete_type (type); if (!NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); base_binfos = BINFO_BASE_BINFOS (TYPE_BINFO (type)); /* Virtual bases are initialized first */ for (i = 0; base_binfos->iterate (i, &binfo); i++) { if (BINFO_VIRTUAL_P (binfo)) { vec_safe_push (vector, binfo); } } /* Now non-virtuals */ for (i = 0; base_binfos->iterate (i, &binfo); i++) { if (!BINFO_VIRTUAL_P (binfo)) { vec_safe_push (vector, binfo); } } bases_vec = make_tree_vec (vector->length ()); for (i = 0; i < vector->length (); ++i) { TREE_VEC_ELT (bases_vec, i) = BINFO_TYPE ((*vector)[i]); } return bases_vec; } /* Implement the __bases keyword: Return the base classes of type */ /* Find morally non-virtual base classes by walking binfo hierarchy */ /* Virtual base classes are handled separately in finish_bases */ static tree dfs_calculate_bases_pre (tree binfo, void * /*data_*/) { /* Don't walk bases of virtual bases */ return BINFO_VIRTUAL_P (binfo) ? dfs_skip_bases : NULL_TREE; } static tree dfs_calculate_bases_post (tree binfo, void *data_) { vec<tree, va_gc> **data = ((vec<tree, va_gc> **) data_); if (!BINFO_VIRTUAL_P (binfo)) { vec_safe_push (*data, BINFO_TYPE (binfo)); } return NULL_TREE; } /* Calculates the morally non-virtual base classes of a class */ static vec<tree, va_gc> * calculate_bases_helper (tree type) { vec<tree, va_gc> *vector = make_tree_vector(); /* Now add non-virtual base classes in order of construction */ dfs_walk_all (TYPE_BINFO (type), dfs_calculate_bases_pre, dfs_calculate_bases_post, &vector); return vector; } tree calculate_bases (tree type) { vec<tree, va_gc> *vector = make_tree_vector(); tree bases_vec = NULL_TREE; unsigned i; vec<tree, va_gc> *vbases; vec<tree, va_gc> *nonvbases; tree binfo; complete_type (type); if (!NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); /* First go through virtual base classes */ for (vbases = CLASSTYPE_VBASECLASSES (type), i = 0; vec_safe_iterate (vbases, i, &binfo); i++) { vec<tree, va_gc> *vbase_bases; vbase_bases = calculate_bases_helper (BINFO_TYPE (binfo)); vec_safe_splice (vector, vbase_bases); release_tree_vector (vbase_bases); } /* Now for the non-virtual bases */ nonvbases = calculate_bases_helper (type); vec_safe_splice (vector, nonvbases); release_tree_vector (nonvbases); /* Last element is entire class, so don't copy */ bases_vec = make_tree_vec (vector->length () - 1); for (i = 0; i < vector->length () - 1; ++i) { TREE_VEC_ELT (bases_vec, i) = (*vector)[i]; } release_tree_vector (vector); return bases_vec; } tree finish_bases (tree type, bool direct) { tree bases = NULL_TREE; if (!processing_template_decl) { /* Parameter packs can only be used in templates */ error ("Parameter pack __bases only valid in template declaration"); return error_mark_node; } bases = cxx_make_type (BASES); BASES_TYPE (bases) = type; BASES_DIRECT (bases) = direct; SET_TYPE_STRUCTURAL_EQUALITY (bases); return bases; } /* Perform C++-specific checks for __builtin_offsetof before calling fold_offsetof. */ tree finish_offsetof (tree expr, location_t loc) { /* If we're processing a template, we can't finish the semantics yet. Otherwise we can fold the entire expression now. */ if (processing_template_decl) { expr = build1 (OFFSETOF_EXPR, size_type_node, expr); SET_EXPR_LOCATION (expr, loc); return expr; } if (TREE_CODE (expr) == PSEUDO_DTOR_EXPR) { error ("cannot apply %<offsetof%> to destructor %<~%T%>", TREE_OPERAND (expr, 2)); return error_mark_node; } if (TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (expr)) == METHOD_TYPE || TREE_TYPE (expr) == unknown_type_node) { if (INDIRECT_REF_P (expr)) error ("second operand of %<offsetof%> is neither a single " "identifier nor a sequence of member accesses and " "array references"); else { if (TREE_CODE (expr) == COMPONENT_REF || TREE_CODE (expr) == COMPOUND_EXPR) expr = TREE_OPERAND (expr, 1); error ("cannot apply %<offsetof%> to member function %qD", expr); } return error_mark_node; } if (REFERENCE_REF_P (expr)) expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == COMPONENT_REF) { tree object = TREE_OPERAND (expr, 0); if (!complete_type_or_else (TREE_TYPE (object), object)) return error_mark_node; if (warn_invalid_offsetof && CLASS_TYPE_P (TREE_TYPE (object)) && CLASSTYPE_NON_STD_LAYOUT (TREE_TYPE (object)) && cp_unevaluated_operand == 0) pedwarn (loc, OPT_Winvalid_offsetof, "offsetof within non-standard-layout type %qT is undefined", TREE_TYPE (object)); } return fold_offsetof (expr); } /* Replace the AGGR_INIT_EXPR at *TP with an equivalent CALL_EXPR. This function is broken out from the above for the benefit of the tree-ssa project. */ void simplify_aggr_init_expr (tree *tp) { tree aggr_init_expr = *tp; /* Form an appropriate CALL_EXPR. */ tree fn = AGGR_INIT_EXPR_FN (aggr_init_expr); tree slot = AGGR_INIT_EXPR_SLOT (aggr_init_expr); tree type = TREE_TYPE (slot); tree call_expr; enum style_t { ctor, arg, pcc } style; if (AGGR_INIT_VIA_CTOR_P (aggr_init_expr)) style = ctor; #ifdef PCC_STATIC_STRUCT_RETURN else if (1) style = pcc; #endif else { gcc_assert (TREE_ADDRESSABLE (type)); style = arg; } call_expr = build_call_array_loc (input_location, TREE_TYPE (TREE_TYPE (TREE_TYPE (fn))), fn, aggr_init_expr_nargs (aggr_init_expr), AGGR_INIT_EXPR_ARGP (aggr_init_expr)); TREE_NOTHROW (call_expr) = TREE_NOTHROW (aggr_init_expr); CALL_EXPR_LIST_INIT_P (call_expr) = CALL_EXPR_LIST_INIT_P (aggr_init_expr); if (style == ctor) { /* Replace the first argument to the ctor with the address of the slot. */ cxx_mark_addressable (slot); CALL_EXPR_ARG (call_expr, 0) = build1 (ADDR_EXPR, build_pointer_type (type), slot); } else if (style == arg) { /* Just mark it addressable here, and leave the rest to expand_call{,_inline}. */ cxx_mark_addressable (slot); CALL_EXPR_RETURN_SLOT_OPT (call_expr) = true; call_expr = build2 (INIT_EXPR, TREE_TYPE (call_expr), slot, call_expr); } else if (style == pcc) { /* If we're using the non-reentrant PCC calling convention, then we need to copy the returned value out of the static buffer into the SLOT. */ push_deferring_access_checks (dk_no_check); call_expr = build_aggr_init (slot, call_expr, DIRECT_BIND | LOOKUP_ONLYCONVERTING, tf_warning_or_error); pop_deferring_access_checks (); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (slot), call_expr, slot); } if (AGGR_INIT_ZERO_FIRST (aggr_init_expr)) { tree init = build_zero_init (type, NULL_TREE, /*static_storage_p=*/false); init = build2 (INIT_EXPR, void_type_node, slot, init); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (call_expr), init, call_expr); } *tp = call_expr; } /* Emit all thunks to FN that should be emitted when FN is emitted. */ void emit_associated_thunks (tree fn) { /* When we use vcall offsets, we emit thunks with the virtual functions to which they thunk. The whole point of vcall offsets is so that you can know statically the entire set of thunks that will ever be needed for a given virtual function, thereby enabling you to output all the thunks with the function itself. */ if (DECL_VIRTUAL_P (fn) /* Do not emit thunks for extern template instantiations. */ && ! DECL_REALLY_EXTERN (fn)) { tree thunk; for (thunk = DECL_THUNKS (fn); thunk; thunk = DECL_CHAIN (thunk)) { if (!THUNK_ALIAS (thunk)) { use_thunk (thunk, /*emit_p=*/1); if (DECL_RESULT_THUNK_P (thunk)) { tree probe; for (probe = DECL_THUNKS (thunk); probe; probe = DECL_CHAIN (probe)) use_thunk (probe, /*emit_p=*/1); } } else gcc_assert (!DECL_THUNKS (thunk)); } } } /* Generate RTL for FN. */ bool expand_or_defer_fn_1 (tree fn) { /* When the parser calls us after finishing the body of a template function, we don't really want to expand the body. */ if (processing_template_decl) { /* Normally, collection only occurs in rest_of_compilation. So, if we don't collect here, we never collect junk generated during the processing of templates until we hit a non-template function. It's not safe to do this inside a nested class, though, as the parser may have local state that is not a GC root. */ if (!function_depth) ggc_collect (); return false; } gcc_assert (DECL_SAVED_TREE (fn)); /* We make a decision about linkage for these functions at the end of the compilation. Until that point, we do not want the back end to output them -- but we do want it to see the bodies of these functions so that it can inline them as appropriate. */ if (DECL_DECLARED_INLINE_P (fn) || DECL_IMPLICIT_INSTANTIATION (fn)) { if (DECL_INTERFACE_KNOWN (fn)) /* We've already made a decision as to how this function will be handled. */; else if (!at_eof) tentative_decl_linkage (fn); else import_export_decl (fn); /* If the user wants us to keep all inline functions, then mark this function as needed so that finish_file will make sure to output it later. Similarly, all dllexport'd functions must be emitted; there may be callers in other DLLs. */ if (DECL_DECLARED_INLINE_P (fn) && !DECL_REALLY_EXTERN (fn) && (flag_keep_inline_functions || (flag_keep_inline_dllexport && lookup_attribute ("dllexport", DECL_ATTRIBUTES (fn))))) { mark_needed (fn); DECL_EXTERNAL (fn) = 0; } } /* If this is a constructor or destructor body, we have to clone it. */ if (maybe_clone_body (fn)) { /* We don't want to process FN again, so pretend we've written it out, even though we haven't. */ TREE_ASM_WRITTEN (fn) = 1; /* If this is a constexpr function, keep DECL_SAVED_TREE. */ if (!DECL_DECLARED_CONSTEXPR_P (fn)) DECL_SAVED_TREE (fn) = NULL_TREE; return false; } /* There's no reason to do any of the work here if we're only doing semantic analysis; this code just generates RTL. */ if (flag_syntax_only) return false; return true; } void expand_or_defer_fn (tree fn) { if (expand_or_defer_fn_1 (fn)) { function_depth++; /* Expand or defer, at the whim of the compilation unit manager. */ cgraph_node::finalize_function (fn, function_depth > 1); emit_associated_thunks (fn); function_depth--; } } struct nrv_data { nrv_data () : visited (37) {} tree var; tree result; hash_table<pointer_hash <tree_node> > visited; }; /* Helper function for walk_tree, used by finalize_nrv below. */ static tree finalize_nrv_r (tree* tp, int* walk_subtrees, void* data) { struct nrv_data *dp = (struct nrv_data *)data; tree_node **slot; /* No need to walk into types. There wouldn't be any need to walk into non-statements, except that we have to consider STMT_EXPRs. */ if (TYPE_P (*tp)) *walk_subtrees = 0; /* Change all returns to just refer to the RESULT_DECL; this is a nop, but differs from using NULL_TREE in that it indicates that we care about the value of the RESULT_DECL. */ else if (TREE_CODE (*tp) == RETURN_EXPR) TREE_OPERAND (*tp, 0) = dp->result; /* Change all cleanups for the NRV to only run when an exception is thrown. */ else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == dp->var) CLEANUP_EH_ONLY (*tp) = 1; /* Replace the DECL_EXPR for the NRV with an initialization of the RESULT_DECL, if needed. */ else if (TREE_CODE (*tp) == DECL_EXPR && DECL_EXPR_DECL (*tp) == dp->var) { tree init; if (DECL_INITIAL (dp->var) && DECL_INITIAL (dp->var) != error_mark_node) init = build2 (INIT_EXPR, void_type_node, dp->result, DECL_INITIAL (dp->var)); else init = build_empty_stmt (EXPR_LOCATION (*tp)); DECL_INITIAL (dp->var) = NULL_TREE; SET_EXPR_LOCATION (init, EXPR_LOCATION (*tp)); *tp = init; } /* And replace all uses of the NRV with the RESULT_DECL. */ else if (*tp == dp->var) *tp = dp->result; /* Avoid walking into the same tree more than once. Unfortunately, we can't just use walk_tree_without duplicates because it would only call us for the first occurrence of dp->var in the function body. */ slot = dp->visited.find_slot (*tp, INSERT); if (*slot) *walk_subtrees = 0; else *slot = *tp; /* Keep iterating. */ return NULL_TREE; } /* Called from finish_function to implement the named return value optimization by overriding all the RETURN_EXPRs and pertinent CLEANUP_STMTs and replacing all occurrences of VAR with RESULT, the RESULT_DECL for the function. */ void finalize_nrv (tree *tp, tree var, tree result) { struct nrv_data data; /* Copy name from VAR to RESULT. */ DECL_NAME (result) = DECL_NAME (var); /* Don't forget that we take its address. */ TREE_ADDRESSABLE (result) = TREE_ADDRESSABLE (var); /* Finally set DECL_VALUE_EXPR to avoid assigning a stack slot at -O0 for the original var and debug info uses RESULT location for VAR. */ SET_DECL_VALUE_EXPR (var, result); DECL_HAS_VALUE_EXPR_P (var) = 1; data.var = var; data.result = result; cp_walk_tree (tp, finalize_nrv_r, &data, 0); } /* Create CP_OMP_CLAUSE_INFO for clause C. Returns true if it is invalid. */ bool cxx_omp_create_clause_info (tree c, tree type, bool need_default_ctor, bool need_copy_ctor, bool need_copy_assignment, bool need_dtor) { int save_errorcount = errorcount; tree info, t; /* Always allocate 3 elements for simplicity. These are the function decls for the ctor, dtor, and assignment op. This layout is known to the three lang hooks, cxx_omp_clause_default_init, cxx_omp_clause_copy_init, and cxx_omp_clause_assign_op. */ info = make_tree_vec (3); CP_OMP_CLAUSE_INFO (c) = info; if (need_default_ctor || need_copy_ctor) { if (need_default_ctor) t = get_default_ctor (type); else t = get_copy_ctor (type, tf_warning_or_error); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 0) = t; } if (need_dtor && TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)) TREE_VEC_ELT (info, 1) = get_dtor (type, tf_warning_or_error); if (need_copy_assignment) { t = get_copy_assign (type); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 2) = t; } return errorcount != save_errorcount; } /* Helper function for handle_omp_array_sections. Called recursively to handle multiple array-section-subscripts. C is the clause, T current expression (initially OMP_CLAUSE_DECL), which is either a TREE_LIST for array-section-subscript (TREE_PURPOSE is low-bound expression if specified, TREE_VALUE length expression if specified, TREE_CHAIN is what it has been specified after, or some decl. TYPES vector is populated with array section types, MAYBE_ZERO_LEN set to true if any of the array-section-subscript could have length of zero (explicit or implicit), FIRST_NON_ONE is the index of the first array-section-subscript which is known not to have length of one. Given say: map(a[:b][2:1][:c][:2][:d][e:f][2:5]) FIRST_NON_ONE will be 3, array-section-subscript [:b], [2:1] and [:c] all are or may have length of 1, array-section-subscript [:2] is the first one knonwn not to have length 1. For array-section-subscript <= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't 0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above case though, as some lengths could be zero. */ static tree handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types, bool &maybe_zero_len, unsigned int &first_non_one) { tree ret, low_bound, length, type; if (TREE_CODE (t) != TREE_LIST) { if (error_operand_p (t)) return error_mark_node; if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) return NULL_TREE; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TREE_CODE (t) == VAR_DECL && DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (type_dependent_expression_p (t)) return NULL_TREE; t = convert_from_reference (t); return t; } ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types, maybe_zero_len, first_non_one); if (ret == error_mark_node || ret == NULL_TREE) return ret; type = TREE_TYPE (ret); low_bound = TREE_PURPOSE (t); length = TREE_VALUE (t); if ((low_bound && type_dependent_expression_p (low_bound)) || (length && type_dependent_expression_p (length))) return NULL_TREE; if (low_bound == error_mark_node || length == error_mark_node) return error_mark_node; if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound))) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE of array section does not have integral type", low_bound); return error_mark_node; } if (length && !INTEGRAL_TYPE_P (TREE_TYPE (length))) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE of array section does not have integral type", length); return error_mark_node; } if (low_bound) low_bound = mark_rvalue_use (low_bound); if (length) length = mark_rvalue_use (length); if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (length != NULL_TREE) { if (!integer_nonzerop (length)) maybe_zero_len = true; if (first_non_one == types.length () && (TREE_CODE (length) != INTEGER_CST || integer_onep (length))) first_non_one++; } if (TREE_CODE (type) == ARRAY_TYPE) { if (length == NULL_TREE && (TYPE_DOMAIN (type) == NULL_TREE || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)) { error_at (OMP_CLAUSE_LOCATION (c), "for unknown bound array type length expression must " "be specified"); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST && tree_int_cst_sgn (low_bound) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative low bound in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST) { tree size = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (type)), size_one_node); if (TREE_CODE (low_bound) == INTEGER_CST) { if (tree_int_cst_lt (size, low_bound)) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE above array section size " "in %qs clause", low_bound, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (tree_int_cst_equal (size, low_bound)) maybe_zero_len = true; else if (length == NULL_TREE && first_non_one == types.length () && tree_int_cst_equal (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), low_bound)) first_non_one++; } else if (length == NULL_TREE) { maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } if (length && TREE_CODE (length) == INTEGER_CST) { if (tree_int_cst_lt (size, length)) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE above array section size " "in %qs clause", length, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST) { tree lbpluslen = size_binop (PLUS_EXPR, fold_convert (sizetype, low_bound), fold_convert (sizetype, length)); if (TREE_CODE (lbpluslen) == INTEGER_CST && tree_int_cst_lt (size, lbpluslen)) { error_at (OMP_CLAUSE_LOCATION (c), "high bound %qE above array section size " "in %qs clause", lbpluslen, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } } } else if (length == NULL_TREE) { maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } /* For [lb:] we will need to evaluate lb more than once. */ if (length == NULL_TREE && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) { tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } } } else if (TREE_CODE (type) == POINTER_TYPE) { if (length == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "for pointer type length expression must be specified"); return error_mark_node; } /* If there is a pointer type anywhere but in the very first array-section-subscript, the array section can't be contiguous. */ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TREE_CODE (TREE_CHAIN (t)) == TREE_LIST) { error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have pointer or array type", ret); return error_mark_node; } if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) types.safe_push (TREE_TYPE (ret)); /* We will need to evaluate lb more than once. */ tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } ret = grok_array_decl (OMP_CLAUSE_LOCATION (c), ret, low_bound, false); return ret; } /* Handle array sections for clause C. */ static bool handle_omp_array_sections (tree c) { bool maybe_zero_len = false; unsigned int first_non_one = 0; auto_vec<tree> types; tree first = handle_omp_array_sections_1 (c, OMP_CLAUSE_DECL (c), types, maybe_zero_len, first_non_one); if (first == error_mark_node) return true; if (first == NULL_TREE) return false; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND) { tree t = OMP_CLAUSE_DECL (c); tree tem = NULL_TREE; if (processing_template_decl) return false; /* Need to evaluate side effects in the length expressions if any. */ while (TREE_CODE (t) == TREE_LIST) { if (TREE_VALUE (t) && TREE_SIDE_EFFECTS (TREE_VALUE (t))) { if (tem == NULL_TREE) tem = TREE_VALUE (t); else tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem), TREE_VALUE (t), tem); } t = TREE_CHAIN (t); } if (tem) first = build2 (COMPOUND_EXPR, TREE_TYPE (first), tem, first); OMP_CLAUSE_DECL (c) = first; } else { unsigned int num = types.length (), i; tree t, side_effects = NULL_TREE, size = NULL_TREE; tree condition = NULL_TREE; if (int_size_in_bytes (TREE_TYPE (first)) <= 0) maybe_zero_len = true; if (processing_template_decl && maybe_zero_len) return false; for (i = num, t = OMP_CLAUSE_DECL (c); i > 0; t = TREE_CHAIN (t)) { tree low_bound = TREE_PURPOSE (t); tree length = TREE_VALUE (t); i--; if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (!maybe_zero_len && i > first_non_one) { if (integer_nonzerop (low_bound)) goto do_warn_noncontiguous; if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && TYPE_DOMAIN (types[i]) && TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))) == INTEGER_CST) { tree size; size = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); if (!tree_int_cst_equal (length, size)) { do_warn_noncontiguous: error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs " "clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return true; } } if (!processing_template_decl && length != NULL_TREE && TREE_SIDE_EFFECTS (length)) { if (side_effects == NULL_TREE) side_effects = length; else side_effects = build2 (COMPOUND_EXPR, TREE_TYPE (side_effects), length, side_effects); } } else if (processing_template_decl) continue; else { tree l; if (i > first_non_one && length && integer_nonzerop (length)) continue; if (length) l = fold_convert (sizetype, length); else { l = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); l = size_binop (MINUS_EXPR, l, fold_convert (sizetype, low_bound)); } if (i > first_non_one) { l = fold_build2 (NE_EXPR, boolean_type_node, l, size_zero_node); if (condition == NULL_TREE) condition = l; else condition = fold_build2 (BIT_AND_EXPR, boolean_type_node, l, condition); } else if (size == NULL_TREE) { size = size_in_bytes (TREE_TYPE (types[i])); size = size_binop (MULT_EXPR, size, l); if (condition) size = fold_build3 (COND_EXPR, sizetype, condition, size, size_zero_node); } else size = size_binop (MULT_EXPR, size, l); } } if (!processing_template_decl) { if (side_effects) size = build2 (COMPOUND_EXPR, sizetype, side_effects, size); OMP_CLAUSE_DECL (c) = first; OMP_CLAUSE_SIZE (c) = size; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) return false; tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER); if (!cxx_mark_addressable (t)) return false; OMP_CLAUSE_DECL (c2) = t; t = build_fold_addr_expr (first); t = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t); tree ptr = OMP_CLAUSE_DECL (c2); ptr = convert_from_reference (ptr); if (!POINTER_TYPE_P (TREE_TYPE (ptr))) ptr = build_fold_addr_expr (ptr); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, ptr)); OMP_CLAUSE_SIZE (c2) = t; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = c2; ptr = OMP_CLAUSE_DECL (c2); if (TREE_CODE (TREE_TYPE (ptr)) == REFERENCE_TYPE && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (ptr)))) { tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c3, GOMP_MAP_POINTER); OMP_CLAUSE_DECL (c3) = ptr; OMP_CLAUSE_DECL (c2) = convert_from_reference (ptr); OMP_CLAUSE_SIZE (c3) = size_zero_node; OMP_CLAUSE_CHAIN (c3) = OMP_CLAUSE_CHAIN (c2); OMP_CLAUSE_CHAIN (c2) = c3; } } } return false; } /* Return identifier to look up for omp declare reduction. */ tree omp_reduction_id (enum tree_code reduction_code, tree reduction_id, tree type) { const char *p = NULL; const char *m = NULL; switch (reduction_code) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: case BIT_AND_EXPR: case BIT_XOR_EXPR: case BIT_IOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: reduction_id = ansi_opname (reduction_code); break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } if (type != NULL_TREE) m = mangle_type_string (TYPE_MAIN_VARIANT (type)); const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); if (strncmp (p, prefix, lenp - 1) == 0) lenp = 1; size_t len = strlen (p); size_t lenm = m ? strlen (m) + 1 : 0; char *name = XALLOCAVEC (char, lenp + len + lenm); if (lenp > 1) memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); if (m) { name[lenp + len - 1] = '~'; memcpy (name + lenp + len, m, lenm); } return get_identifier (name); } /* Lookup OpenMP UDR ID for TYPE, return the corresponding artificial FUNCTION_DECL or NULL_TREE if not found. */ static tree omp_reduction_lookup (location_t loc, tree id, tree type, tree *baselinkp, vec<tree> *ambiguousp) { tree orig_id = id; tree baselink = NULL_TREE; if (identifier_p (id)) { cp_id_kind idk; bool nonint_cst_expression_p; const char *error_msg; id = omp_reduction_id (ERROR_MARK, id, type); tree decl = lookup_name (id); if (decl == NULL_TREE) decl = error_mark_node; id = finish_id_expression (id, decl, NULL_TREE, &idk, false, true, &nonint_cst_expression_p, false, true, false, false, &error_msg, loc); if (idk == CP_ID_KIND_UNQUALIFIED && identifier_p (id)) { vec<tree, va_gc> *args = NULL; vec_safe_push (args, build_reference_type (type)); id = perform_koenig_lookup (id, args, tf_none); } } else if (TREE_CODE (id) == SCOPE_REF) id = lookup_qualified_name (TREE_OPERAND (id, 0), omp_reduction_id (ERROR_MARK, TREE_OPERAND (id, 1), type), false, false); tree fns = id; if (id && is_overloaded_fn (id)) id = get_fns (id); for (; id; id = OVL_NEXT (id)) { tree fndecl = OVL_CURRENT (id); if (TREE_CODE (fndecl) == FUNCTION_DECL) { tree argtype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); if (same_type_p (TREE_TYPE (argtype), type)) break; } } if (id && BASELINK_P (fns)) { if (baselinkp) *baselinkp = fns; else baselink = fns; } if (id == NULL_TREE && CLASS_TYPE_P (type) && TYPE_BINFO (type)) { vec<tree> ambiguous = vNULL; tree binfo = TYPE_BINFO (type), base_binfo, ret = NULL_TREE; unsigned int ix; if (ambiguousp == NULL) ambiguousp = &ambiguous; for (ix = 0; BINFO_BASE_ITERATE (binfo, ix, base_binfo); ix++) { id = omp_reduction_lookup (loc, orig_id, BINFO_TYPE (base_binfo), baselinkp ? baselinkp : &baselink, ambiguousp); if (id == NULL_TREE) continue; if (!ambiguousp->is_empty ()) ambiguousp->safe_push (id); else if (ret != NULL_TREE) { ambiguousp->safe_push (ret); ambiguousp->safe_push (id); ret = NULL_TREE; } else ret = id; } if (ambiguousp != &ambiguous) return ret; if (!ambiguous.is_empty ()) { const char *str = _("candidates are:"); unsigned int idx; tree udr; error_at (loc, "user defined reduction lookup is ambiguous"); FOR_EACH_VEC_ELT (ambiguous, idx, udr) { inform (DECL_SOURCE_LOCATION (udr), "%s %#D", str, udr); if (idx == 0) str = get_spaces (str); } ambiguous.release (); ret = error_mark_node; baselink = NULL_TREE; } id = ret; } if (id && baselink) perform_or_defer_access_check (BASELINK_BINFO (baselink), id, id, tf_warning_or_error); return id; } /* Helper function for cp_parser_omp_declare_reduction_exprs and tsubst_omp_udr. Remove CLEANUP_STMT for data (omp_priv variable). Also append INIT_EXPR for DECL_INITIAL of omp_priv after its DECL_EXPR. */ tree cp_remove_omp_priv_cleanup_stmt (tree *tp, int *walk_subtrees, void *data) { if (TYPE_P (*tp)) *walk_subtrees = 0; else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == (tree) data) *tp = CLEANUP_BODY (*tp); else if (TREE_CODE (*tp) == DECL_EXPR) { tree decl = DECL_EXPR_DECL (*tp); if (!processing_template_decl && decl == (tree) data && DECL_INITIAL (decl) && DECL_INITIAL (decl) != error_mark_node) { tree list = NULL_TREE; append_to_statement_list_force (*tp, &list); tree init_expr = build2 (INIT_EXPR, void_type_node, decl, DECL_INITIAL (decl)); DECL_INITIAL (decl) = NULL_TREE; append_to_statement_list_force (init_expr, &list); *tp = list; } } return NULL_TREE; } /* Data passed from cp_check_omp_declare_reduction to cp_check_omp_declare_reduction_r. */ struct cp_check_omp_declare_reduction_data { location_t loc; tree stmts[7]; bool combiner_p; }; /* Helper function for cp_check_omp_declare_reduction, called via cp_walk_tree. */ static tree cp_check_omp_declare_reduction_r (tree *tp, int *, void *data) { struct cp_check_omp_declare_reduction_data *udr_data = (struct cp_check_omp_declare_reduction_data *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 0 : 3]) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 1 : 4])) { location_t loc = udr_data->loc; if (udr_data->combiner_p) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } /* Diagnose violation of OpenMP #pragma omp declare reduction restrictions. */ void cp_check_omp_declare_reduction (tree udr) { tree type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (udr))); gcc_assert (TREE_CODE (type) == REFERENCE_TYPE); type = TREE_TYPE (type); int i; location_t loc = DECL_SOURCE_LOCATION (udr); if (type == error_mark_node) return; if (ARITHMETIC_TYPE_P (type)) { static enum tree_code predef_codes[] = { PLUS_EXPR, MULT_EXPR, MINUS_EXPR, BIT_AND_EXPR, BIT_XOR_EXPR, BIT_IOR_EXPR, TRUTH_ANDIF_EXPR, TRUTH_ORIF_EXPR }; for (i = 0; i < 8; i++) { tree id = omp_reduction_id (predef_codes[i], NULL_TREE, NULL_TREE); const char *n1 = IDENTIFIER_POINTER (DECL_NAME (udr)); const char *n2 = IDENTIFIER_POINTER (id); if (strncmp (n1, n2, IDENTIFIER_LENGTH (id)) == 0 && (n1[IDENTIFIER_LENGTH (id)] == '~' || n1[IDENTIFIER_LENGTH (id)] == '\0')) break; } if (i == 8 && TREE_CODE (type) != COMPLEX_EXPR) { const char prefix_minmax[] = "omp declare reduction m"; size_t prefix_size = sizeof (prefix_minmax) - 1; const char *n = IDENTIFIER_POINTER (DECL_NAME (udr)); if (strncmp (IDENTIFIER_POINTER (DECL_NAME (udr)), prefix_minmax, prefix_size) == 0 && ((n[prefix_size] == 'i' && n[prefix_size + 1] == 'n') || (n[prefix_size] == 'a' && n[prefix_size + 1] == 'x')) && (n[prefix_size + 2] == '~' || n[prefix_size + 2] == '\0')) i = 0; } if (i < 8) { error_at (loc, "predeclared arithmetic type %qT in " "%<#pragma omp declare reduction%>", type); return; } } else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE || TREE_CODE (type) == ARRAY_TYPE) { error_at (loc, "function or array type %qT in " "%<#pragma omp declare reduction%>", type); return; } else if (TREE_CODE (type) == REFERENCE_TYPE) { error_at (loc, "reference type %qT in %<#pragma omp declare reduction%>", type); return; } else if (TYPE_QUALS_NO_ADDR_SPACE (type)) { error_at (loc, "const, volatile or __restrict qualified type %qT in " "%<#pragma omp declare reduction%>", type); return; } tree body = DECL_SAVED_TREE (udr); if (body == NULL_TREE || TREE_CODE (body) != STATEMENT_LIST) return; tree_stmt_iterator tsi; struct cp_check_omp_declare_reduction_data data; memset (data.stmts, 0, sizeof data.stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) data.stmts[i] = tsi_stmt (tsi); data.loc = loc; gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (data.stmts[0]) == DECL_EXPR && TREE_CODE (data.stmts[1]) == DECL_EXPR); if (TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0]))) return; data.combiner_p = true; if (cp_walk_tree (&data.stmts[2], cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; } if (i >= 6) { gcc_assert (TREE_CODE (data.stmts[3]) == DECL_EXPR && TREE_CODE (data.stmts[4]) == DECL_EXPR); data.combiner_p = false; if (cp_walk_tree (&data.stmts[5], cp_check_omp_declare_reduction_r, &data, NULL) || cp_walk_tree (&DECL_INITIAL (DECL_EXPR_DECL (data.stmts[3])), cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; if (i == 7) gcc_assert (TREE_CODE (data.stmts[6]) == DECL_EXPR); } } /* Helper function of finish_omp_clauses. Clone STMT as if we were making an inline call. But, remap the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER and OMP_DECL2 VAR_DECL (omp_in resp. omp_priv) to DECL. */ static tree clone_omp_udr (tree stmt, tree omp_decl1, tree omp_decl2, tree decl, tree placeholder) { copy_body_data id; hash_map<tree, tree> decl_map; decl_map.put (omp_decl1, placeholder); decl_map.put (omp_decl2, decl); memset (&id, 0, sizeof (id)); id.src_fn = DECL_CONTEXT (omp_decl1); id.dst_fn = current_function_decl; id.src_cfun = DECL_STRUCT_FUNCTION (id.src_fn); id.decl_map = &decl_map; id.copy_decl = copy_decl_no_change; id.transform_call_graph_edges = CB_CGE_DUPLICATE; id.transform_new_cfg = true; id.transform_return_to_modify = false; id.transform_lang_insert_block = NULL; id.eh_lp_nr = 0; walk_tree (&stmt, copy_tree_body_r, &id, NULL); return stmt; } /* Helper function of finish_omp_clauses, called via cp_walk_tree. Find OMP_CLAUSE_PLACEHOLDER (passed in DATA) in *TP. */ static tree find_omp_placeholder_r (tree *tp, int *, void *data) { if (*tp == (tree) data) return *tp; return NULL_TREE; } /* Helper function of finish_omp_clauses. Handle OMP_CLAUSE_REDUCTION C. Return true if there is some error and the clause should be removed. */ static bool finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor) { tree t = OMP_CLAUSE_DECL (c); bool predefined = false; tree type = TREE_TYPE (t); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); if (type == error_mark_node) return true; else if (ARITHMETIC_TYPE_P (type)) switch (OMP_CLAUSE_REDUCTION_CODE (c)) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: predefined = true; break; case MIN_EXPR: case MAX_EXPR: if (TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (FLOAT_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: if (FLOAT_TYPE_P (type)) break; predefined = true; break; default: break; } else if (TREE_CODE (type) == ARRAY_TYPE || TYPE_READONLY (type)) { error ("%qE has invalid type for %<reduction%>", t); return true; } else if (!processing_template_decl) { t = require_complete_type (t); if (t == error_mark_node) return true; OMP_CLAUSE_DECL (c) = t; } if (predefined) { OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; return false; } else if (processing_template_decl) return false; tree id = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); type = TYPE_MAIN_VARIANT (TREE_TYPE (t)); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; if (id == NULL_TREE) id = omp_reduction_id (OMP_CLAUSE_REDUCTION_CODE (c), NULL_TREE, NULL_TREE); id = omp_reduction_lookup (OMP_CLAUSE_LOCATION (c), id, type, NULL, NULL); if (id) { if (id == error_mark_node) return true; id = OVL_CURRENT (id); mark_used (id); tree body = DECL_SAVED_TREE (id); if (!body) return true; if (TREE_CODE (body) == STATEMENT_LIST) { tree_stmt_iterator tsi; tree placeholder = NULL_TREE; int i; tree stmts[7]; tree atype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (id))); atype = TREE_TYPE (atype); bool need_static_cast = !same_type_p (type, atype); memset (stmts, 0, sizeof stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) stmts[i] = tsi_stmt (tsi); gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR && TREE_CODE (stmts[1]) == DECL_EXPR); placeholder = build_lang_decl (VAR_DECL, NULL_TREE, type); DECL_ARTIFICIAL (placeholder) = 1; DECL_IGNORED_P (placeholder) = 1; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = placeholder; if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[0]))) cxx_mark_addressable (placeholder); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[1])) && TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) != REFERENCE_TYPE) cxx_mark_addressable (OMP_CLAUSE_DECL (c)); tree omp_out = placeholder; tree omp_in = convert_from_reference (OMP_CLAUSE_DECL (c)); if (need_static_cast) { tree rtype = build_reference_type (atype); omp_out = build_static_cast (rtype, omp_out, tf_warning_or_error); omp_in = build_static_cast (rtype, omp_in, tf_warning_or_error); if (omp_out == error_mark_node || omp_in == error_mark_node) return true; omp_out = convert_from_reference (omp_out); omp_in = convert_from_reference (omp_in); } OMP_CLAUSE_REDUCTION_MERGE (c) = clone_omp_udr (stmts[2], DECL_EXPR_DECL (stmts[0]), DECL_EXPR_DECL (stmts[1]), omp_in, omp_out); } if (i >= 6) { gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR && TREE_CODE (stmts[4]) == DECL_EXPR); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[3]))) cxx_mark_addressable (OMP_CLAUSE_DECL (c)); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[4]))) cxx_mark_addressable (placeholder); tree omp_priv = convert_from_reference (OMP_CLAUSE_DECL (c)); tree omp_orig = placeholder; if (need_static_cast) { if (i == 7) { error_at (OMP_CLAUSE_LOCATION (c), "user defined reduction with constructor " "initializer for base class %qT", atype); return true; } tree rtype = build_reference_type (atype); omp_priv = build_static_cast (rtype, omp_priv, tf_warning_or_error); omp_orig = build_static_cast (rtype, omp_orig, tf_warning_or_error); if (omp_priv == error_mark_node || omp_orig == error_mark_node) return true; omp_priv = convert_from_reference (omp_priv); omp_orig = convert_from_reference (omp_orig); } if (i == 6) *need_default_ctor = true; OMP_CLAUSE_REDUCTION_INIT (c) = clone_omp_udr (stmts[5], DECL_EXPR_DECL (stmts[4]), DECL_EXPR_DECL (stmts[3]), omp_priv, omp_orig); if (cp_walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c), find_omp_placeholder_r, placeholder, NULL)) OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1; } else if (i >= 3) { if (CLASS_TYPE_P (type) && !pod_type_p (type)) *need_default_ctor = true; else { tree init; tree v = convert_from_reference (t); if (AGGREGATE_TYPE_P (TREE_TYPE (v))) init = build_constructor (TREE_TYPE (v), NULL); else init = fold_convert (TREE_TYPE (v), integer_zero_node); OMP_CLAUSE_REDUCTION_INIT (c) = build2 (INIT_EXPR, TREE_TYPE (v), v, init); } } } } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) *need_dtor = true; else { error ("user defined reduction not found for %qD", t); return true; } return false; } /* For all elements of CLAUSES, validate them vs OpenMP constraints. Remove any elements from the list that are invalid. */ tree finish_omp_clauses (tree clauses) { bitmap_head generic_head, firstprivate_head, lastprivate_head; bitmap_head aligned_head; tree c, t, *pc; bool branch_seen = false; bool copyprivate_seen = false; bitmap_obstack_initialize (NULL); bitmap_initialize (&generic_head, &bitmap_default_obstack); bitmap_initialize (&firstprivate_head, &bitmap_default_obstack); bitmap_initialize (&lastprivate_head, &bitmap_default_obstack); bitmap_initialize (&aligned_head, &bitmap_default_obstack); for (pc = &clauses, c = clauses; c ; c = *pc) { bool remove = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: goto check_dup_generic; case OMP_CLAUSE_PRIVATE: goto check_dup_generic; case OMP_CLAUSE_REDUCTION: goto check_dup_generic; case OMP_CLAUSE_COPYPRIVATE: copyprivate_seen = true; goto check_dup_generic; case OMP_CLAUSE_COPYIN: goto check_dup_generic; case OMP_CLAUSE_LINEAR: t = OMP_CLAUSE_DECL (c); if ((VAR_P (t) || TREE_CODE (t) == PARM_DECL) && !type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t)) && TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE) { error ("linear clause applied to non-integral non-pointer " "variable with %qT type", TREE_TYPE (t)); remove = true; break; } t = OMP_CLAUSE_LINEAR_STEP (c); if (t == NULL_TREE) t = integer_one_node; if (t == error_mark_node) { remove = true; break; } else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("linear step expression must be integral"); remove = true; break; } else { t = mark_rvalue_use (t); if (!processing_template_decl && (VAR_P (OMP_CLAUSE_DECL (c)) || TREE_CODE (OMP_CLAUSE_DECL (c)) == PARM_DECL)) { if (TREE_CODE (OMP_CLAUSE_DECL (c)) == PARM_DECL) t = maybe_constant_value (t); t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) == POINTER_TYPE) { t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, OMP_CLAUSE_DECL (c), t); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, t, OMP_CLAUSE_DECL (c)); if (t == error_mark_node) { remove = true; break; } } else t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (c)), t); } OMP_CLAUSE_LINEAR_STEP (c) = t; } goto check_dup_generic; check_dup_generic: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error ("%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_FIRSTPRIVATE: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in clause %<firstprivate%>", t); else error ("%qE is not a variable in clause %<firstprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&firstprivate_head, DECL_UID (t)); break; case OMP_CLAUSE_LASTPRIVATE: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in clause %<lastprivate%>", t); else error ("%qE is not a variable in clause %<lastprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&lastprivate_head, DECL_UID (t)); break; case OMP_CLAUSE_IF: t = OMP_CLAUSE_IF_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_IF_EXPR (c) = t; break; case OMP_CLAUSE_FINAL: t = OMP_CLAUSE_FINAL_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_FINAL_EXPR (c) = t; break; case OMP_CLAUSE_NUM_THREADS: t = OMP_CLAUSE_NUM_THREADS_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("num_threads expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; } break; case OMP_CLAUSE_SCHEDULE: t = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && (OMP_CLAUSE_SCHEDULE_KIND (c) != OMP_CLAUSE_SCHEDULE_CILKFOR) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("schedule chunk size expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_CILKFOR) { t = convert_to_integer (long_integer_type_node, t); if (t == error_mark_node) { remove = true; break; } } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; } break; case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_SAFELEN: t = OMP_CLAUSE_OPERAND (c, 0); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%qs length expression must be integral", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else { t = mark_rvalue_use (t); t = maybe_constant_value (t); if (!processing_template_decl) { if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error ("%qs length expression must be positive constant" " integer expression", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } OMP_CLAUSE_OPERAND (c, 0) = t; } break; case OMP_CLAUSE_NUM_TEAMS: t = OMP_CLAUSE_NUM_TEAMS_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<num_teams%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_NUM_TEAMS_EXPR (c) = t; } break; case OMP_CLAUSE_ASYNC: t = OMP_CLAUSE_ASYNC_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<async%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_ASYNC_EXPR (c) = t; } break; case OMP_CLAUSE_VECTOR_LENGTH: t = OMP_CLAUSE_VECTOR_LENGTH_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_VECTOR_LENGTH_EXPR (c) = t; break; case OMP_CLAUSE_WAIT: t = OMP_CLAUSE_WAIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_WAIT_EXPR (c) = t; break; case OMP_CLAUSE_THREAD_LIMIT: t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<thread_limit%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t; } break; case OMP_CLAUSE_DEVICE: t = OMP_CLAUSE_DEVICE_ID (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<device%> id must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DEVICE_ID (c) = t; } break; case OMP_CLAUSE_DIST_SCHEDULE: t = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<dist_schedule%> chunk size expression must be " "integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t; } break; case OMP_CLAUSE_ALIGNED: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in %<aligned%> clause", t); else error ("%qE is not a variable in %<aligned%> clause", t); remove = true; } else if (!type_dependent_expression_p (t) && TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE && TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE && (TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE || (!POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (t))) && (TREE_CODE (TREE_TYPE (TREE_TYPE (t))) != ARRAY_TYPE)))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE in %<aligned%> clause is neither a pointer nor " "an array nor a reference to pointer or array", t); remove = true; } else if (bitmap_bit_p (&aligned_head, DECL_UID (t))) { error ("%qD appears more than once in %<aligned%> clauses", t); remove = true; } else bitmap_set_bit (&aligned_head, DECL_UID (t)); t = OMP_CLAUSE_ALIGNED_ALIGNMENT (c); if (t == error_mark_node) remove = true; else if (t == NULL_TREE) break; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<aligned%> clause alignment expression must " "be integral"); remove = true; } else { t = mark_rvalue_use (t); t = maybe_constant_value (t); if (!processing_template_decl) { if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error ("%<aligned%> clause alignment expression must be " "positive constant integer expression"); remove = true; } } OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = t; } break; case OMP_CLAUSE_DEPEND: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c)) remove = true; break; } if (t == error_mark_node) remove = true; else if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in %<depend%> clause", t); else error ("%qE is not a variable in %<depend%> clause", t); remove = true; } else if (!processing_template_decl && !cxx_mark_addressable (t)) remove = true; break; case OMP_CLAUSE_MAP: case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE__CACHE_: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c)) remove = true; else { t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != TREE_LIST && !type_dependent_expression_p (t) && !cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "array section does not have mappable type " "in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } break; } if (t == error_mark_node) remove = true; else if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER) break; if (DECL_P (t)) error ("%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error ("%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (TREE_CODE (t) == VAR_DECL && DECL_THREAD_LOCAL_P (t)) { error ("%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!processing_template_decl && TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE && !cxx_mark_addressable (t)) remove = true; else if (!(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER) && !type_dependent_expression_p (t) && !cp_omp_mappable_type ((TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE) ? TREE_TYPE (TREE_TYPE (t)) : TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error ("%qD appears more than once in motion clauses", t); else error ("%qD appears more than once in map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_UNIFORM: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not an argument in %<uniform%> clause", t); else error ("%qE is not an argument in %<uniform%> clause", t); remove = true; break; } goto check_dup_generic; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PARALLEL: case OMP_CLAUSE_FOR: case OMP_CLAUSE_SECTIONS: case OMP_CLAUSE_TASKGROUP: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE__CILK_FOR_COUNT_: break; case OMP_CLAUSE_INBRANCH: case OMP_CLAUSE_NOTINBRANCH: if (branch_seen) { error ("%<inbranch%> clause is incompatible with " "%<notinbranch%>"); remove = true; } branch_seen = true; break; default: gcc_unreachable (); } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } for (pc = &clauses, c = clauses; c ; c = *pc) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); bool remove = false; bool need_complete_non_reference = false; bool need_default_ctor = false; bool need_copy_ctor = false; bool need_copy_assignment = false; bool need_implicitly_determined = false; bool need_dtor = false; tree type, inner_type; switch (c_kind) { case OMP_CLAUSE_SHARED: need_implicitly_determined = true; break; case OMP_CLAUSE_PRIVATE: need_complete_non_reference = true; need_default_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_FIRSTPRIVATE: need_complete_non_reference = true; need_copy_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_LASTPRIVATE: need_complete_non_reference = true; need_copy_assignment = true; need_implicitly_determined = true; break; case OMP_CLAUSE_REDUCTION: need_implicitly_determined = true; break; case OMP_CLAUSE_COPYPRIVATE: need_copy_assignment = true; break; case OMP_CLAUSE_COPYIN: need_copy_assignment = true; break; case OMP_CLAUSE_NOWAIT: if (copyprivate_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<nowait%> clause must not be used together " "with %<copyprivate%>"); *pc = OMP_CLAUSE_CHAIN (c); continue; } /* FALLTHRU */ default: pc = &OMP_CLAUSE_CHAIN (c); continue; } t = OMP_CLAUSE_DECL (c); if (processing_template_decl && !VAR_P (t) && TREE_CODE (t) != PARM_DECL) { pc = &OMP_CLAUSE_CHAIN (c); continue; } switch (c_kind) { case OMP_CLAUSE_LASTPRIVATE: if (!bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { need_default_ctor = true; need_dtor = true; } break; case OMP_CLAUSE_REDUCTION: if (finish_omp_reduction_clause (c, &need_default_ctor, &need_dtor)) remove = true; else t = OMP_CLAUSE_DECL (c); break; case OMP_CLAUSE_COPYIN: if (!VAR_P (t) || !DECL_THREAD_LOCAL_P (t)) { error ("%qE must be %<threadprivate%> for %<copyin%>", t); remove = true; } break; default: break; } if (need_complete_non_reference || need_copy_assignment) { t = require_complete_type (t); if (t == error_mark_node) remove = true; else if (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE && need_complete_non_reference) { error ("%qE has reference type for %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } if (need_implicitly_determined) { const char *share_name = NULL; if (VAR_P (t) && DECL_THREAD_LOCAL_P (t)) share_name = "threadprivate"; else switch (cxx_omp_predetermined_sharing (t)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: /* const vars may be specified in firstprivate clause. */ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE && cxx_omp_const_qual_no_mutable (t)) break; share_name = "shared"; break; case OMP_CLAUSE_DEFAULT_PRIVATE: share_name = "private"; break; default: gcc_unreachable (); } if (share_name) { error ("%qE is predetermined %qs for %qs", t, share_name, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } /* We're interested in the base element, not arrays. */ inner_type = type = TREE_TYPE (t); while (TREE_CODE (inner_type) == ARRAY_TYPE) inner_type = TREE_TYPE (inner_type); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && TREE_CODE (inner_type) == REFERENCE_TYPE) inner_type = TREE_TYPE (inner_type); /* Check for special function availability by building a call to one. Save the results, because later we won't be in the right context for making these queries. */ if (CLASS_TYPE_P (inner_type) && COMPLETE_TYPE_P (inner_type) && (need_default_ctor || need_copy_ctor || need_copy_assignment || need_dtor) && !type_dependent_expression_p (t) && cxx_omp_create_clause_info (c, inner_type, need_default_ctor, need_copy_ctor, need_copy_assignment, need_dtor)) remove = true; if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } bitmap_obstack_release (NULL); return clauses; } /* For all variables in the tree_list VARS, mark them as thread local. */ void finish_omp_threadprivate (tree vars) { tree t; /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); if (error_operand_p (v)) ; else if (!VAR_P (v)) error ("%<threadprivate%> %qD is not file, namespace " "or block scope variable", v); /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ else if (TREE_USED (v) && (DECL_LANG_SPECIFIC (v) == NULL || !CP_DECL_THREADPRIVATE_P (v))) error ("%qE declared %<threadprivate%> after first use", v); else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v)) error ("automatic variable %qE cannot be %<threadprivate%>", v); else if (! COMPLETE_TYPE_P (complete_type (TREE_TYPE (v)))) error ("%<threadprivate%> %qE has incomplete type", v); else if (TREE_STATIC (v) && TYPE_P (CP_DECL_CONTEXT (v)) && CP_DECL_CONTEXT (v) != current_class_type) error ("%<threadprivate%> %qE directive not " "in %qT definition", v, CP_DECL_CONTEXT (v)); else { /* Allocate a LANG_SPECIFIC structure for V, if needed. */ if (DECL_LANG_SPECIFIC (v) == NULL) { retrofit_lang_decl (v); /* Make sure that DECL_DISCRIMINATOR_P continues to be true after the allocation of the lang_decl structure. */ if (DECL_DISCRIMINATOR_P (v)) DECL_LANG_SPECIFIC (v)->u.base.u2sel = 1; } if (! DECL_THREAD_LOCAL_P (v)) { set_decl_tls_model (v, decl_default_tls_model (v)); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } CP_DECL_THREADPRIVATE_P (v) = 1; } } } /* Build an OpenMP structured block. */ tree begin_omp_structured_block (void) { return do_pushlevel (sk_omp); } tree finish_omp_structured_block (tree block) { return do_poplevel (block); } /* Generate OACC_DATA, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_DATA. */ tree finish_oacc_data (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_DATA); TREE_TYPE (stmt) = void_type_node; OACC_DATA_CLAUSES (stmt) = clauses; OACC_DATA_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OACC_KERNELS, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_KERNELS. */ tree finish_oacc_kernels (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_KERNELS); TREE_TYPE (stmt) = void_type_node; OACC_KERNELS_CLAUSES (stmt) = clauses; OACC_KERNELS_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OACC_PARALLEL, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_PARALLEL. */ tree finish_oacc_parallel (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_PARALLEL); TREE_TYPE (stmt) = void_type_node; OACC_PARALLEL_CLAUSES (stmt) = clauses; OACC_PARALLEL_BODY (stmt) = block; return add_stmt (stmt); } /* Similarly, except force the retention of the BLOCK. */ tree begin_omp_parallel (void) { keep_next_level (true); return begin_omp_structured_block (); } tree finish_omp_parallel (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_PARALLEL); TREE_TYPE (stmt) = void_type_node; OMP_PARALLEL_CLAUSES (stmt) = clauses; OMP_PARALLEL_BODY (stmt) = body; return add_stmt (stmt); } tree begin_omp_task (void) { keep_next_level (true); return begin_omp_structured_block (); } tree finish_omp_task (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_TASK); TREE_TYPE (stmt) = void_type_node; OMP_TASK_CLAUSES (stmt) = clauses; OMP_TASK_BODY (stmt) = body; return add_stmt (stmt); } /* Helper function for finish_omp_for. Convert Ith random access iterator into integral iterator. Return FALSE if successful. */ static bool handle_omp_for_class_iterator (int i, location_t locus, tree declv, tree initv, tree condv, tree incrv, tree *body, tree *pre_body, tree clauses, tree *lastp) { tree diff, iter_init, iter_incr = NULL, last; tree incr_var = NULL, orig_pre_body, orig_body, c; tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); tree iter = decl; location_t elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); switch (TREE_CODE (cond)) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: case NE_EXPR: if (TREE_OPERAND (cond, 1) == iter) cond = build2 (swap_tree_comparison (TREE_CODE (cond)), TREE_TYPE (cond), iter, TREE_OPERAND (cond, 0)); if (TREE_OPERAND (cond, 0) != iter) cond = error_mark_node; else { tree tem = build_x_binary_op (EXPR_LOCATION (cond), TREE_CODE (cond), iter, ERROR_MARK, TREE_OPERAND (cond, 1), ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (tem)) return true; } break; default: cond = error_mark_node; break; } if (cond == error_mark_node) { error_at (elocus, "invalid controlling predicate"); return true; } diff = build_x_binary_op (elocus, MINUS_EXPR, TREE_OPERAND (cond, 1), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (diff)) return true; if (TREE_CODE (TREE_TYPE (diff)) != INTEGER_TYPE) { error_at (elocus, "difference between %qE and %qD does not have integer type", TREE_OPERAND (cond, 1), iter); return true; } switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != iter) { incr = error_mark_node; break; } iter_incr = build_x_unary_op (EXPR_LOCATION (incr), TREE_CODE (incr), iter, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; else if (TREE_CODE (incr) == PREINCREMENT_EXPR || TREE_CODE (incr) == POSTINCREMENT_EXPR) incr = integer_one_node; else incr = integer_minus_one_node; break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != iter) incr = error_mark_node; else if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree rhs = TREE_OPERAND (incr, 1); if (TREE_OPERAND (rhs, 0) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 1))) != INTEGER_TYPE) incr = error_mark_node; else { iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, TREE_CODE (rhs), TREE_OPERAND (rhs, 1), tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 1); incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); if (TREE_CODE (rhs) == MINUS_EXPR) { incr = build1 (NEGATE_EXPR, TREE_TYPE (diff), incr); incr = fold_if_not_in_template (incr); } if (TREE_CODE (incr) != INTEGER_CST && (TREE_CODE (incr) != NOP_EXPR || (TREE_CODE (TREE_OPERAND (incr, 0)) != INTEGER_CST))) iter_incr = NULL; } } else if (TREE_OPERAND (rhs, 1) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 0))) != INTEGER_TYPE || TREE_CODE (rhs) != PLUS_EXPR) incr = error_mark_node; else { iter_incr = build_x_binary_op (EXPR_LOCATION (rhs), PLUS_EXPR, TREE_OPERAND (rhs, 0), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, NOP_EXPR, iter_incr, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 0); iter_incr = NULL; } } else incr = error_mark_node; } else incr = error_mark_node; break; default: incr = error_mark_node; break; } if (incr == error_mark_node) { error_at (elocus, "invalid increment expression"); return true; } incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (c) == iter) break; decl = create_temporary_var (TREE_TYPE (diff)); pushdecl (decl); add_decl_expr (decl); last = create_temporary_var (TREE_TYPE (diff)); pushdecl (last); add_decl_expr (last); if (c && iter_incr == NULL) { incr_var = create_temporary_var (TREE_TYPE (diff)); pushdecl (incr_var); add_decl_expr (incr_var); } gcc_assert (stmts_are_full_exprs_p ()); orig_pre_body = *pre_body; *pre_body = push_stmt_list (); if (orig_pre_body) add_stmt (orig_pre_body); if (init != NULL) finish_expr_stmt (build_x_modify_expr (elocus, iter, NOP_EXPR, init, tf_warning_or_error)); init = build_int_cst (TREE_TYPE (diff), 0); if (c && iter_incr == NULL) { finish_expr_stmt (build_x_modify_expr (elocus, incr_var, NOP_EXPR, incr, tf_warning_or_error)); incr = incr_var; iter_incr = build_x_modify_expr (elocus, iter, PLUS_EXPR, incr, tf_warning_or_error); } finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, init, tf_warning_or_error)); *pre_body = pop_stmt_list (*pre_body); cond = cp_build_binary_op (elocus, TREE_CODE (cond), decl, diff, tf_warning_or_error); incr = build_modify_expr (elocus, decl, NULL_TREE, PLUS_EXPR, elocus, incr, NULL_TREE); orig_body = *body; *body = push_stmt_list (); iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), decl, last); iter_init = build_x_modify_expr (elocus, iter, PLUS_EXPR, iter_init, tf_warning_or_error); if (iter_init != error_mark_node) iter_init = build1 (NOP_EXPR, void_type_node, iter_init); finish_expr_stmt (iter_init); finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, decl, tf_warning_or_error)); add_stmt (orig_body); *body = pop_stmt_list (*body); if (c) { OMP_CLAUSE_LASTPRIVATE_STMT (c) = push_stmt_list (); finish_expr_stmt (iter_incr); OMP_CLAUSE_LASTPRIVATE_STMT (c) = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (c)); } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; *lastp = last; return false; } /* Build and validate an OMP_FOR statement. CLAUSES, BODY, COND, INCR are directly for their associated operands in the statement. DECL and INIT are a combo; if DECL is NULL then INIT ought to be a MODIFY_EXPR, and the DECL should be extracted. PRE_BODY are optional statements that need to go before the loop into its sk_omp scope. */ tree finish_omp_for (location_t locus, enum tree_code code, tree declv, tree initv, tree condv, tree incrv, tree body, tree pre_body, tree clauses) { tree omp_for = NULL, orig_incr = NULL; tree decl = NULL, init, cond, incr, orig_decl = NULL_TREE, block = NULL_TREE; tree last = NULL_TREE; location_t elocus; int i; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (decl == NULL) { if (init != NULL) switch (TREE_CODE (init)) { case MODIFY_EXPR: decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 1); break; case MODOP_EXPR: if (TREE_CODE (TREE_OPERAND (init, 1)) == NOP_EXPR) { decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 2); } break; default: break; } if (decl == NULL) { error_at (locus, "expected iteration declaration or initialization"); return NULL; } } if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (cond == NULL) { error_at (elocus, "missing controlling predicate"); return NULL; } if (incr == NULL) { error_at (elocus, "missing increment expression"); return NULL; } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; } if (dependent_omp_for_p (declv, initv, condv, incrv)) { tree stmt; stmt = make_node (code); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { /* This is really just a place-holder. We'll be decomposing this again and going through the cp_build_modify_expr path below when we instantiate the thing. */ TREE_VEC_ELT (initv, i) = build2 (MODIFY_EXPR, void_type_node, TREE_VEC_ELT (declv, i), TREE_VEC_ELT (initv, i)); } TREE_TYPE (stmt) = void_type_node; OMP_FOR_INIT (stmt) = initv; OMP_FOR_COND (stmt) = condv; OMP_FOR_INCR (stmt) = incrv; OMP_FOR_BODY (stmt) = body; OMP_FOR_PRE_BODY (stmt) = pre_body; OMP_FOR_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, locus); return add_stmt (stmt); } if (processing_template_decl) orig_incr = make_tree_vec (TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); ) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (!DECL_P (decl)) { error_at (elocus, "expected iteration declaration or initialization"); return NULL; } if (incr && TREE_CODE (incr) == MODOP_EXPR) { if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; incr = cp_build_modify_expr (TREE_OPERAND (incr, 0), TREE_CODE (TREE_OPERAND (incr, 1)), TREE_OPERAND (incr, 2), tf_warning_or_error); } if (CLASS_TYPE_P (TREE_TYPE (decl))) { if (code == OMP_SIMD) { error_at (elocus, "%<#pragma omp simd%> used with class " "iteration variable %qE", decl); return NULL; } if (code == CILK_FOR && i == 0) orig_decl = decl; if (handle_omp_for_class_iterator (i, locus, declv, initv, condv, incrv, &body, &pre_body, clauses, &last)) return NULL; continue; } if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && !TYPE_PTR_P (TREE_TYPE (decl))) { error_at (elocus, "invalid type for iteration variable %qE", decl); return NULL; } if (!processing_template_decl) { init = fold_build_cleanup_point_expr (TREE_TYPE (init), init); init = cp_build_modify_expr (decl, NOP_EXPR, init, tf_warning_or_error); } else init = build2 (MODIFY_EXPR, void_type_node, decl, init); if (cond && TREE_SIDE_EFFECTS (cond) && COMPARISON_CLASS_P (cond) && !processing_template_decl) { tree t = TREE_OPERAND (cond, 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (cond, 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (cond, 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (cond, 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (decl == error_mark_node || init == error_mark_node) return NULL; TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; i++; } if (IS_EMPTY_STMT (pre_body)) pre_body = NULL; if (code == CILK_FOR && !processing_template_decl) block = push_stmt_list (); omp_for = c_finish_omp_for (locus, code, declv, initv, condv, incrv, body, pre_body); if (omp_for == NULL) { if (block) pop_stmt_list (block); return NULL; } for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)); i++) { decl = TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (omp_for), i), 0); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i); if (TREE_CODE (incr) != MODIFY_EXPR) continue; if (TREE_SIDE_EFFECTS (TREE_OPERAND (incr, 1)) && BINARY_CLASS_P (TREE_OPERAND (incr, 1)) && !processing_template_decl) { tree t = TREE_OPERAND (TREE_OPERAND (incr, 1), 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (orig_incr) TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i) = TREE_VEC_ELT (orig_incr, i); } OMP_FOR_CLAUSES (omp_for) = clauses; if (block) { tree omp_par = make_node (OMP_PARALLEL); TREE_TYPE (omp_par) = void_type_node; OMP_PARALLEL_CLAUSES (omp_par) = NULL_TREE; tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; BIND_EXPR_BODY (bind) = pop_stmt_list (block); OMP_PARALLEL_BODY (omp_par) = bind; if (OMP_FOR_PRE_BODY (omp_for)) { add_stmt (OMP_FOR_PRE_BODY (omp_for)); OMP_FOR_PRE_BODY (omp_for) = NULL_TREE; } init = TREE_VEC_ELT (OMP_FOR_INIT (omp_for), 0); decl = TREE_OPERAND (init, 0); cond = TREE_VEC_ELT (OMP_FOR_COND (omp_for), 0); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), 0); tree t = TREE_OPERAND (cond, 1), c, clauses, *pc; clauses = OMP_FOR_CLAUSES (omp_for); OMP_FOR_CLAUSES (omp_for) = NULL_TREE; for (pc = &clauses; *pc; ) if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_SCHEDULE) { gcc_assert (OMP_FOR_CLAUSES (omp_for) == NULL_TREE); OMP_FOR_CLAUSES (omp_for) = *pc; *pc = OMP_CLAUSE_CHAIN (*pc); OMP_CLAUSE_CHAIN (OMP_FOR_CLAUSES (omp_for)) = NULL_TREE; } else { gcc_assert (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_FIRSTPRIVATE); pc = &OMP_CLAUSE_CHAIN (*pc); } if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (cond, 1) = get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (cond, 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (TREE_CODE (incr) == MODIFY_EXPR) { t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (TREE_OPERAND (incr, 1), 1) = get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } } t = TREE_OPERAND (init, 1); if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (init, 1) = get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (init, 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (orig_decl && orig_decl != decl) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = orig_decl; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (last) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = last; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } c = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; c = build_omp_clause (input_location, OMP_CLAUSE__CILK_FOR_COUNT_); OMP_CLAUSE_OPERAND (c, 0) = cilk_for_number_of_iterations (omp_for); OMP_CLAUSE_CHAIN (c) = clauses; OMP_PARALLEL_CLAUSES (omp_par) = finish_omp_clauses (c); add_stmt (omp_par); return omp_par; } else if (code == CILK_FOR && processing_template_decl) { tree c, clauses = OMP_FOR_CLAUSES (omp_for); if (orig_decl && orig_decl != decl) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = orig_decl; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (last) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = last; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } OMP_FOR_CLAUSES (omp_for) = clauses; } return omp_for; } void finish_omp_atomic (enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool seq_cst) { tree orig_lhs; tree orig_rhs; tree orig_v; tree orig_lhs1; tree orig_rhs1; bool dependent_p; tree stmt; orig_lhs = lhs; orig_rhs = rhs; orig_v = v; orig_lhs1 = lhs1; orig_rhs1 = rhs1; dependent_p = false; stmt = NULL_TREE; /* Even in a template, we can detect invalid uses of the atomic pragma if neither LHS nor RHS is type-dependent. */ if (processing_template_decl) { dependent_p = (type_dependent_expression_p (lhs) || (rhs && type_dependent_expression_p (rhs)) || (v && type_dependent_expression_p (v)) || (lhs1 && type_dependent_expression_p (lhs1)) || (rhs1 && type_dependent_expression_p (rhs1))); if (!dependent_p) { lhs = build_non_dependent_expr (lhs); if (rhs) rhs = build_non_dependent_expr (rhs); if (v) v = build_non_dependent_expr (v); if (lhs1) lhs1 = build_non_dependent_expr (lhs1); if (rhs1) rhs1 = build_non_dependent_expr (rhs1); } } if (!dependent_p) { bool swapped = false; if (rhs1 && cp_tree_equal (lhs, rhs)) { tree tem = rhs; rhs = rhs1; rhs1 = tem; swapped = !commutative_tree_code (opcode); } if (rhs1 && !cp_tree_equal (lhs, rhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } if (lhs1 && !cp_tree_equal (lhs, lhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } stmt = c_finish_omp_atomic (input_location, code, opcode, lhs, rhs, v, lhs1, rhs1, swapped, seq_cst); if (stmt == error_mark_node) return; } if (processing_template_decl) { if (code == OMP_ATOMIC_READ) { stmt = build_min_nt_loc (EXPR_LOCATION (orig_lhs), OMP_ATOMIC_READ, orig_lhs); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } else { if (opcode == NOP_EXPR) stmt = build2 (MODIFY_EXPR, void_type_node, orig_lhs, orig_rhs); else stmt = build2 (opcode, void_type_node, orig_lhs, orig_rhs); if (orig_rhs1) stmt = build_min_nt_loc (EXPR_LOCATION (orig_rhs1), COMPOUND_EXPR, orig_rhs1, stmt); if (code != OMP_ATOMIC) { stmt = build_min_nt_loc (EXPR_LOCATION (orig_lhs1), code, orig_lhs1, stmt); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } } stmt = build2 (OMP_ATOMIC, void_type_node, integer_zero_node, stmt); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; } finish_expr_stmt (stmt); } void finish_omp_barrier (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_flush (void) { tree fn = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_taskwait (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_taskyield (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_cancel (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL); int mask = 0; if (find_omp_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (find_omp_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (find_omp_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (find_omp_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancel must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } vec<tree, va_gc> *vec = make_tree_vector (); tree ifc = find_omp_clause (clauses, OMP_CLAUSE_IF); if (ifc != NULL_TREE) { tree type = TREE_TYPE (OMP_CLAUSE_IF_EXPR (ifc)); ifc = fold_build2_loc (OMP_CLAUSE_LOCATION (ifc), NE_EXPR, boolean_type_node, OMP_CLAUSE_IF_EXPR (ifc), build_zero_cst (type)); } else ifc = boolean_true_node; vec->quick_push (build_int_cst (integer_type_node, mask)); vec->quick_push (ifc); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_cancellation_point (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT); int mask = 0; if (find_omp_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (find_omp_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (find_omp_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (find_omp_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancellation point must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } vec<tree, va_gc> *vec = make_tree_vector_single (build_int_cst (integer_type_node, mask)); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } /* Begin a __transaction_atomic or __transaction_relaxed statement. If PCOMPOUND is non-null, this is for a function-transaction-block, and we should create an extra compound stmt. */ tree begin_transaction_stmt (location_t loc, tree *pcompound, int flags) { tree r; if (pcompound) *pcompound = begin_compound_stmt (0); r = build_stmt (loc, TRANSACTION_EXPR, NULL_TREE); /* Only add the statement to the function if support enabled. */ if (flag_tm) add_stmt (r); else error_at (loc, ((flags & TM_STMT_ATTR_RELAXED) != 0 ? G_("%<__transaction_relaxed%> without " "transactional memory support enabled") : G_("%<__transaction_atomic%> without " "transactional memory support enabled"))); TRANSACTION_EXPR_BODY (r) = push_stmt_list (); TREE_SIDE_EFFECTS (r) = 1; return r; } /* End a __transaction_atomic or __transaction_relaxed statement. If COMPOUND_STMT is non-null, this is for a function-transaction-block, and we should end the compound. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ void finish_transaction_stmt (tree stmt, tree compound_stmt, int flags, tree noex) { TRANSACTION_EXPR_BODY (stmt) = pop_stmt_list (TRANSACTION_EXPR_BODY (stmt)); TRANSACTION_EXPR_OUTER (stmt) = (flags & TM_STMT_ATTR_OUTER) != 0; TRANSACTION_EXPR_RELAXED (stmt) = (flags & TM_STMT_ATTR_RELAXED) != 0; TRANSACTION_EXPR_IS_STMT (stmt) = 1; /* noexcept specifications are not allowed for function transactions. */ gcc_assert (!(noex && compound_stmt)); if (noex) { tree body = build_must_not_throw_expr (TRANSACTION_EXPR_BODY (stmt), noex); /* This may not be true when the STATEMENT_LIST is empty. */ if (EXPR_P (body)) SET_EXPR_LOCATION (body, EXPR_LOCATION (TRANSACTION_EXPR_BODY (stmt))); TREE_SIDE_EFFECTS (body) = 1; TRANSACTION_EXPR_BODY (stmt) = body; } if (compound_stmt) finish_compound_stmt (compound_stmt); } /* Build a __transaction_atomic or __transaction_relaxed expression. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ tree build_transaction_expr (location_t loc, tree expr, int flags, tree noex) { tree ret; if (noex) { expr = build_must_not_throw_expr (expr, noex); if (EXPR_P (expr)) SET_EXPR_LOCATION (expr, loc); TREE_SIDE_EFFECTS (expr) = 1; } ret = build1 (TRANSACTION_EXPR, TREE_TYPE (expr), expr); if (flags & TM_STMT_ATTR_RELAXED) TRANSACTION_EXPR_RELAXED (ret) = 1; TREE_SIDE_EFFECTS (ret) = 1; SET_EXPR_LOCATION (ret, loc); return ret; } void init_cp_semantics (void) { } /* Build a STATIC_ASSERT for a static assertion with the condition CONDITION and the message text MESSAGE. LOCATION is the location of the static assertion in the source code. When MEMBER_P, this static assertion is a member of a class. */ void finish_static_assert (tree condition, tree message, location_t location, bool member_p) { if (message == NULL_TREE || message == error_mark_node || condition == NULL_TREE || condition == error_mark_node) return; if (check_for_bare_parameter_packs (condition)) condition = error_mark_node; if (type_dependent_expression_p (condition) || value_dependent_expression_p (condition)) { /* We're in a template; build a STATIC_ASSERT and put it in the right place. */ tree assertion; assertion = make_node (STATIC_ASSERT); STATIC_ASSERT_CONDITION (assertion) = condition; STATIC_ASSERT_MESSAGE (assertion) = message; STATIC_ASSERT_SOURCE_LOCATION (assertion) = location; if (member_p) maybe_add_class_template_decl_list (current_class_type, assertion, /*friend_p=*/0); else add_stmt (assertion); return; } /* Fold the expression and convert it to a boolean value. */ condition = instantiate_non_dependent_expr (condition); condition = cp_convert (boolean_type_node, condition, tf_warning_or_error); condition = maybe_constant_value (condition); if (TREE_CODE (condition) == INTEGER_CST && !integer_zerop (condition)) /* Do nothing; the condition is satisfied. */ ; else { location_t saved_loc = input_location; input_location = location; if (TREE_CODE (condition) == INTEGER_CST && integer_zerop (condition)) /* Report the error. */ error ("static assertion failed: %s", TREE_STRING_POINTER (message)); else if (condition && condition != error_mark_node) { error ("non-constant condition for static assertion"); if (require_potential_rvalue_constant_expression (condition)) cxx_constant_value (condition); } input_location = saved_loc; } } /* Implements the C++0x decltype keyword. Returns the type of EXPR, suitable for use as a type-specifier. ID_EXPRESSION_OR_MEMBER_ACCESS_P is true when EXPR was parsed as an id-expression or a class member access, FALSE when it was parsed as a full expression. */ tree finish_decltype_type (tree expr, bool id_expression_or_member_access_p, tsubst_flags_t complain) { tree type = NULL_TREE; if (!expr || error_operand_p (expr)) return error_mark_node; if (TYPE_P (expr) || TREE_CODE (expr) == TYPE_DECL || (TREE_CODE (expr) == BIT_NOT_EXPR && TYPE_P (TREE_OPERAND (expr, 0)))) { if (complain & tf_error) error ("argument to decltype must be an expression"); return error_mark_node; } /* Depending on the resolution of DR 1172, we may later need to distinguish instantiation-dependent but not type-dependent expressions so that, say, A<decltype(sizeof(T))>::U doesn't require 'typename'. */ if (instantiation_dependent_expression_p (expr)) { type = cxx_make_type (DECLTYPE_TYPE); DECLTYPE_TYPE_EXPR (type) = expr; DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (type) = id_expression_or_member_access_p; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } /* The type denoted by decltype(e) is defined as follows: */ expr = resolve_nondeduced_context (expr, complain); if (invalid_nonstatic_memfn_p (expr, complain)) return error_mark_node; if (type_unknown_p (expr)) { if (complain & tf_error) error ("decltype cannot resolve address of overloaded function"); return error_mark_node; } /* To get the size of a static data member declared as an array of unknown bound, we need to instantiate it. */ if (VAR_P (expr) && VAR_HAD_UNKNOWN_BOUND (expr) && DECL_TEMPLATE_INSTANTIATION (expr)) instantiate_decl (expr, /*defer_ok*/true, /*expl_inst_mem*/false); if (id_expression_or_member_access_p) { /* If e is an id-expression or a class member access (5.2.5 [expr.ref]), decltype(e) is defined as the type of the entity named by e. If there is no such entity, or e names a set of overloaded functions, the program is ill-formed. */ if (identifier_p (expr)) expr = lookup_name (expr); if (INDIRECT_REF_P (expr)) /* This can happen when the expression is, e.g., "a.b". Just look at the underlying operand. */ expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == MEMBER_REF || TREE_CODE (expr) == SCOPE_REF) /* We're only interested in the field itself. If it is a BASELINK, we will need to see through it in the next step. */ expr = TREE_OPERAND (expr, 1); if (BASELINK_P (expr)) /* See through BASELINK nodes to the underlying function. */ expr = BASELINK_FUNCTIONS (expr); switch (TREE_CODE (expr)) { case FIELD_DECL: if (DECL_BIT_FIELD_TYPE (expr)) { type = DECL_BIT_FIELD_TYPE (expr); break; } /* Fall through for fields that aren't bitfields. */ case FUNCTION_DECL: case VAR_DECL: case CONST_DECL: case PARM_DECL: case RESULT_DECL: case TEMPLATE_PARM_INDEX: expr = mark_type_use (expr); type = TREE_TYPE (expr); break; case ERROR_MARK: type = error_mark_node; break; case COMPONENT_REF: case COMPOUND_EXPR: mark_type_use (expr); type = is_bitfield_expr_with_lowered_type (expr); if (!type) type = TREE_TYPE (TREE_OPERAND (expr, 1)); break; case BIT_FIELD_REF: gcc_unreachable (); case INTEGER_CST: case PTRMEM_CST: /* We can get here when the id-expression refers to an enumerator or non-type template parameter. */ type = TREE_TYPE (expr); break; default: /* Handle instantiated template non-type arguments. */ type = TREE_TYPE (expr); break; } } else { /* Within a lambda-expression: Every occurrence of decltype((x)) where x is a possibly parenthesized id-expression that names an entity of automatic storage duration is treated as if x were transformed into an access to a corresponding data member of the closure type that would have been declared if x were a use of the denoted entity. */ if (outer_automatic_var_p (expr) && current_function_decl && LAMBDA_FUNCTION_P (current_function_decl)) type = capture_decltype (expr); else if (error_operand_p (expr)) type = error_mark_node; else if (expr == current_class_ptr) /* If the expression is just "this", we want the cv-unqualified pointer for the "this" type. */ type = TYPE_MAIN_VARIANT (TREE_TYPE (expr)); else { /* Otherwise, where T is the type of e, if e is an lvalue, decltype(e) is defined as T&; if an xvalue, T&&; otherwise, T. */ cp_lvalue_kind clk = lvalue_kind (expr); type = unlowered_expr_type (expr); gcc_assert (TREE_CODE (type) != REFERENCE_TYPE); /* For vector types, pick a non-opaque variant. */ if (TREE_CODE (type) == VECTOR_TYPE) type = strip_typedefs (type); if (clk != clk_none && !(clk & clk_class)) type = cp_build_reference_type (type, (clk & clk_rvalueref)); } } return type; } /* Called from trait_expr_value to evaluate either __has_nothrow_assign or __has_nothrow_copy, depending on assign_p. */ static bool classtype_has_nothrow_assign_or_copy_p (tree type, bool assign_p) { tree fns; if (assign_p) { int ix; ix = lookup_fnfields_1 (type, ansi_assopname (NOP_EXPR)); if (ix < 0) return false; fns = (*CLASSTYPE_METHOD_VEC (type))[ix]; } else if (TYPE_HAS_COPY_CTOR (type)) { /* If construction of the copy constructor was postponed, create it now. */ if (CLASSTYPE_LAZY_COPY_CTOR (type)) lazily_declare_fn (sfk_copy_constructor, type); if (CLASSTYPE_LAZY_MOVE_CTOR (type)) lazily_declare_fn (sfk_move_constructor, type); fns = CLASSTYPE_CONSTRUCTORS (type); } else return false; for (; fns; fns = OVL_NEXT (fns)) { tree fn = OVL_CURRENT (fns); if (assign_p) { if (copy_fn_p (fn) == 0) continue; } else if (copy_fn_p (fn) <= 0) continue; maybe_instantiate_noexcept (fn); if (!TYPE_NOTHROW_P (TREE_TYPE (fn))) return false; } return true; } /* Actually evaluates the trait. */ static bool trait_expr_value (cp_trait_kind kind, tree type1, tree type2) { enum tree_code type_code1; tree t; type_code1 = TREE_CODE (type1); switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trait_expr_value (CPTK_HAS_TRIVIAL_ASSIGN, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, true)))); case CPTK_HAS_TRIVIAL_ASSIGN: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_ASSIGN (type1)))); case CPTK_HAS_NOTHROW_CONSTRUCTOR: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_CONSTRUCTOR, type1, type2) || (CLASS_TYPE_P (type1) && (t = locate_ctor (type1)) && (maybe_instantiate_noexcept (t), TYPE_NOTHROW_P (TREE_TYPE (t))))); case CPTK_HAS_TRIVIAL_CONSTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DFLT (type1))); case CPTK_HAS_NOTHROW_COPY: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_COPY, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, false))); case CPTK_HAS_TRIVIAL_COPY: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_CTOR (type1))); case CPTK_HAS_TRIVIAL_DESTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DESTRUCTOR (type1))); case CPTK_HAS_VIRTUAL_DESTRUCTOR: return type_has_virtual_destructor (type1); case CPTK_IS_ABSTRACT: return (ABSTRACT_CLASS_TYPE_P (type1)); case CPTK_IS_BASE_OF: return (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && (same_type_ignoring_top_level_qualifiers_p (type1, type2) || DERIVED_FROM_P (type1, type2))); case CPTK_IS_CLASS: return (NON_UNION_CLASS_TYPE_P (type1)); case CPTK_IS_EMPTY: return (NON_UNION_CLASS_TYPE_P (type1) && CLASSTYPE_EMPTY_P (type1)); case CPTK_IS_ENUM: return (type_code1 == ENUMERAL_TYPE); case CPTK_IS_FINAL: return (CLASS_TYPE_P (type1) && CLASSTYPE_FINAL (type1)); case CPTK_IS_LITERAL_TYPE: return (literal_type_p (type1)); case CPTK_IS_POD: return (pod_type_p (type1)); case CPTK_IS_POLYMORPHIC: return (CLASS_TYPE_P (type1) && TYPE_POLYMORPHIC_P (type1)); case CPTK_IS_STD_LAYOUT: return (std_layout_type_p (type1)); case CPTK_IS_TRIVIAL: return (trivial_type_p (type1)); case CPTK_IS_TRIVIALLY_ASSIGNABLE: return is_trivially_xible (MODIFY_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: return is_trivially_xible (INIT_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_COPYABLE: return (trivially_copyable_p (type1)); case CPTK_IS_UNION: return (type_code1 == UNION_TYPE); default: gcc_unreachable (); return false; } } /* If TYPE is an array of unknown bound, or (possibly cv-qualified) void, or a complete type, returns true, otherwise false. */ static bool check_trait_type (tree type) { if (type == NULL_TREE) return true; if (TREE_CODE (type) == TREE_LIST) return (check_trait_type (TREE_VALUE (type)) && check_trait_type (TREE_CHAIN (type))); if (TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type) && COMPLETE_TYPE_P (TREE_TYPE (type))) return true; if (VOID_TYPE_P (type)) return true; return !!complete_type_or_else (strip_array_types (type), NULL_TREE); } /* Process a trait expression. */ tree finish_trait_expr (cp_trait_kind kind, tree type1, tree type2) { if (type1 == error_mark_node || type2 == error_mark_node) return error_mark_node; if (processing_template_decl) { tree trait_expr = make_node (TRAIT_EXPR); TREE_TYPE (trait_expr) = boolean_type_node; TRAIT_EXPR_TYPE1 (trait_expr) = type1; TRAIT_EXPR_TYPE2 (trait_expr) = type2; TRAIT_EXPR_KIND (trait_expr) = kind; return trait_expr; } switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: case CPTK_HAS_TRIVIAL_ASSIGN: case CPTK_HAS_NOTHROW_CONSTRUCTOR: case CPTK_HAS_TRIVIAL_CONSTRUCTOR: case CPTK_HAS_NOTHROW_COPY: case CPTK_HAS_TRIVIAL_COPY: case CPTK_HAS_TRIVIAL_DESTRUCTOR: case CPTK_HAS_VIRTUAL_DESTRUCTOR: case CPTK_IS_ABSTRACT: case CPTK_IS_EMPTY: case CPTK_IS_FINAL: case CPTK_IS_LITERAL_TYPE: case CPTK_IS_POD: case CPTK_IS_POLYMORPHIC: case CPTK_IS_STD_LAYOUT: case CPTK_IS_TRIVIAL: case CPTK_IS_TRIVIALLY_COPYABLE: if (!check_trait_type (type1)) return error_mark_node; break; case CPTK_IS_TRIVIALLY_ASSIGNABLE: case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: if (!check_trait_type (type1) || !check_trait_type (type2)) return error_mark_node; break; case CPTK_IS_BASE_OF: if (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && !same_type_ignoring_top_level_qualifiers_p (type1, type2) && !complete_type_or_else (type2, NULL_TREE)) /* We already issued an error. */ return error_mark_node; break; case CPTK_IS_CLASS: case CPTK_IS_ENUM: case CPTK_IS_UNION: break; default: gcc_unreachable (); } return (trait_expr_value (kind, type1, type2) ? boolean_true_node : boolean_false_node); } /* Do-nothing variants of functions to handle pragma FLOAT_CONST_DECIMAL64, which is ignored for C++. */ void set_float_const_decimal64 (void) { } void clear_float_const_decimal64 (void) { } bool float_const_decimal64_p (void) { return 0; } /* Return true if T designates the implied `this' parameter. */ bool is_this_parameter (tree t) { if (!DECL_P (t) || DECL_NAME (t) != this_identifier) return false; gcc_assert (TREE_CODE (t) == PARM_DECL || is_capture_proxy (t)); return true; } /* Insert the deduced return type for an auto function. */ void apply_deduced_return_type (tree fco, tree return_type) { tree result; if (return_type == error_mark_node) return; if (LAMBDA_FUNCTION_P (fco)) { tree lambda = CLASSTYPE_LAMBDA_EXPR (current_class_type); LAMBDA_EXPR_RETURN_TYPE (lambda) = return_type; } if (DECL_CONV_FN_P (fco)) DECL_NAME (fco) = mangle_conv_op_name_for_type (return_type); TREE_TYPE (fco) = change_return_type (return_type, TREE_TYPE (fco)); result = DECL_RESULT (fco); if (result == NULL_TREE) return; if (TREE_TYPE (result) == return_type) return; if (!processing_template_decl && !VOID_TYPE_P (return_type) && !complete_type_or_else (return_type, NULL_TREE)) return; /* We already have a DECL_RESULT from start_preparsed_function. Now we need to redo the work it and allocate_struct_function did to reflect the new type. */ gcc_assert (current_function_decl == fco); result = build_decl (input_location, RESULT_DECL, NULL_TREE, TYPE_MAIN_VARIANT (return_type)); DECL_ARTIFICIAL (result) = 1; DECL_IGNORED_P (result) = 1; cp_apply_type_quals_to_decl (cp_type_quals (return_type), result); DECL_RESULT (fco) = result; if (!processing_template_decl) { bool aggr = aggregate_value_p (result, fco); #ifdef PCC_STATIC_STRUCT_RETURN cfun->returns_pcc_struct = aggr; #endif cfun->returns_struct = aggr; } } /* DECL is a local variable or parameter from the surrounding scope of a lambda-expression. Returns the decltype for a use of the capture field for DECL even if it hasn't been captured yet. */ static tree capture_decltype (tree decl) { tree lam = CLASSTYPE_LAMBDA_EXPR (DECL_CONTEXT (current_function_decl)); /* FIXME do lookup instead of list walk? */ tree cap = value_member (decl, LAMBDA_EXPR_CAPTURE_LIST (lam)); tree type; if (cap) type = TREE_TYPE (TREE_PURPOSE (cap)); else switch (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lam)) { case CPLD_NONE: error ("%qD is not captured", decl); return error_mark_node; case CPLD_COPY: type = TREE_TYPE (decl); if (TREE_CODE (type) == REFERENCE_TYPE && TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE) type = TREE_TYPE (type); break; case CPLD_REFERENCE: type = TREE_TYPE (decl); if (TREE_CODE (type) != REFERENCE_TYPE) type = build_reference_type (TREE_TYPE (decl)); break; default: gcc_unreachable (); } if (TREE_CODE (type) != REFERENCE_TYPE) { if (!LAMBDA_EXPR_MUTABLE_P (lam)) type = cp_build_qualified_type (type, (cp_type_quals (type) |TYPE_QUAL_CONST)); type = build_reference_type (type); } return type; } #include "gt-cp-semantics.h"
InternalTransferCircuit.h
#ifndef _INTERNAL_TRANSFER_CIRCUIT_H_ #define _INTERNAL_TRANSFER_CIRCUIT_H_ #include "Circuit.h" #include "../Utils/Constants.h" #include "../Utils/Data.h" #include "../Utils/Utils.h" #include "../Gadgets/AccountGadgets.h" #include "../Gadgets/TradingHistoryGadgets.h" #include "ethsnarks.hpp" #include "utils.hpp" using namespace ethsnarks; namespace Loopring { class InternalTransferGadget : public GadgetT { public: const Constants& constants; // User From state BalanceGadget balanceFBefore_From; BalanceGadget balanceTBefore_From; AccountGadget accountBefore_From; // User To state BalanceGadget balanceTBefore_To; AccountGadget accountBefore_To; // Operator state BalanceGadget balanceBefore_O; // Inputs DualVariableGadget accountID_From; DualVariableGadget accountID_To; DualVariableGadget tokenID; DualVariableGadget amount; DualVariableGadget feeTokenID; DualVariableGadget fee; DualVariableGadget type; // Signature Poseidon_gadget_T<9, 1, 6, 53, 8, 1> hash; SignatureVerifier signatureVerifier; // Type NotGadget signatureInvalid; UnsafeAddGadget numConditionalTransfersAfter; RequireEqualGadget type_eq_signatureInvalid; // User To account check RequireNotZeroGadget publicKeyX_notZero; // Fee as float FloatGadget fFee; RequireAccuracyGadget requireAccuracyFee; // Amount as float FloatGadget fAmount; RequireAccuracyGadget requireAccuracyAmount; // Fee payment from From to the operator subadd_gadget feePayment; // Transfer from From to To subadd_gadget transferPayment; // Increase the nonce of From by 1 AddGadget nonce_From_after; // Update User From UpdateBalanceGadget updateBalanceF_From; UpdateBalanceGadget updateBalanceT_From; UpdateAccountGadget updateAccount_From; // Update User To UpdateBalanceGadget updateBalanceT_To; UpdateAccountGadget updateAccount_To; // Update Operator UpdateBalanceGadget updateBalanceF_O; InternalTransferGadget( ProtoboardT &pb, const jubjub::Params& params, const Constants& _constants, const VariableT& accountsMerkleRoot, const VariableT& operatorBalancesRoot, const VariableT& blockExchangeID, const VariableT& numConditionalTransfersBefore, const std::string &prefix ) : GadgetT(pb, prefix), constants(_constants), // User From state balanceFBefore_From(pb, FMT(prefix, "balanceFBefore_From")), balanceTBefore_From(pb, FMT(prefix, "balanceTBefore_From")), accountBefore_From(pb, FMT(prefix, "accountBefore_From")), // User To state balanceTBefore_To(pb, FMT(prefix, "balanceTBefore_To")), accountBefore_To(pb, FMT(prefix, "accountBefore_To")), // Operator state balanceBefore_O(pb, FMT(prefix, "balanceBefore_O")), // Inputs accountID_From(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".accountID_From")), accountID_To(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".accountID_To")), tokenID(pb, NUM_BITS_TOKEN, FMT(prefix, ".tokenID")), amount(pb, NUM_BITS_AMOUNT, FMT(prefix, ".amount")), feeTokenID(pb, NUM_BITS_TOKEN, FMT(prefix, ".feeTokenID")), fee(pb, NUM_BITS_AMOUNT, FMT(prefix, ".fee")), type(pb, NUM_BITS_TYPE, FMT(prefix, ".type")), // Signature hash(pb, var_array({blockExchangeID, accountID_From.packed, accountID_To.packed, tokenID.packed, amount.packed, feeTokenID.packed, fee.packed, accountBefore_From.nonce}), FMT(this->annotation_prefix, ".hash")), signatureVerifier(pb, params, constants, accountBefore_From.publicKey, hash.result(), FMT(prefix, ".signatureVerifier"), false), // Type signatureInvalid(pb, signatureVerifier.result(), ".signatureInvalid"), numConditionalTransfersAfter(pb, numConditionalTransfersBefore, signatureInvalid.result(), ".numConditionalTransfersAfter"), type_eq_signatureInvalid(pb, type.packed, signatureInvalid.result(), ".type_eq_signatureInvalid"), // User To account check publicKeyX_notZero(pb, accountBefore_To.publicKey.x, FMT(prefix, ".publicKeyX_notZero")), // Fee as float fFee(pb, constants, Float16Encoding, FMT(prefix, ".fFee")), requireAccuracyFee(pb, fFee.value(), fee.packed, Float16Accuracy, NUM_BITS_AMOUNT, FMT(prefix, ".requireAccuracyFee")), // Amount as float fAmount(pb, constants, Float24Encoding, FMT(prefix, ".fTansAmount")), requireAccuracyAmount(pb, fAmount.value(), amount.packed, Float24Accuracy, NUM_BITS_AMOUNT, FMT(prefix, ".requireAccuracyAmount")), // Fee payment from From to the operator feePayment(pb, NUM_BITS_AMOUNT, balanceFBefore_From.balance, balanceBefore_O.balance, fFee.value(), FMT(prefix, ".feePayment")), // Transfer from From to To transferPayment(pb, NUM_BITS_AMOUNT, balanceTBefore_From.balance, balanceTBefore_To.balance, fAmount.value(), FMT(prefix, ".transferPayment")), // Increase the nonce of From by 1 (unless it's a conditional transfer) nonce_From_after(pb, accountBefore_From.nonce, signatureVerifier.result(), NUM_BITS_NONCE, FMT(prefix, ".nonce_From_after")), // Update User From updateBalanceF_From(pb, accountBefore_From.balancesRoot, feeTokenID.bits, {balanceFBefore_From.balance, balanceFBefore_From.tradingHistory}, {feePayment.X, balanceFBefore_From.tradingHistory}, FMT(prefix, ".updateBalanceF_From")), updateBalanceT_From(pb, updateBalanceF_From.result(), tokenID.bits, {balanceTBefore_From.balance, balanceTBefore_From.tradingHistory}, {transferPayment.X, balanceTBefore_From.tradingHistory}, FMT(prefix, ".updateBalanceT_From")), updateAccount_From(pb, accountsMerkleRoot, accountID_From.bits, {accountBefore_From.publicKey.x, accountBefore_From.publicKey.y, accountBefore_From.nonce, accountBefore_From.balancesRoot}, {accountBefore_From.publicKey.x, accountBefore_From.publicKey.y, nonce_From_after.result(), updateBalanceT_From.result()}, FMT(prefix, ".updateAccount_From")), // Update User To updateBalanceT_To(pb, accountBefore_To.balancesRoot, tokenID.bits, {balanceTBefore_To.balance, balanceTBefore_To.tradingHistory}, {transferPayment.Y, balanceTBefore_To.tradingHistory}, FMT(prefix, ".updateBalanceT_To")), updateAccount_To(pb, updateAccount_From.result(), accountID_To.bits, {accountBefore_To.publicKey.x, accountBefore_To.publicKey.y, accountBefore_To.nonce, accountBefore_To.balancesRoot}, {accountBefore_To.publicKey.x, accountBefore_To.publicKey.y, accountBefore_To.nonce, updateBalanceT_To.result()}, FMT(prefix, ".updateAccount_To")), // Update Operator updateBalanceF_O(pb, operatorBalancesRoot, feeTokenID.bits, {balanceBefore_O.balance, balanceBefore_O.tradingHistory}, {feePayment.Y, balanceBefore_O.tradingHistory}, FMT(prefix, ".updateBalanceF_O")) { } void generate_r1cs_witness(const InternalTransfer& transfer) { // User From state balanceFBefore_From.generate_r1cs_witness(transfer.balanceUpdateF_From.before); balanceTBefore_From.generate_r1cs_witness(transfer.balanceUpdateT_From.before); accountBefore_From.generate_r1cs_witness(transfer.accountUpdate_From.before); // User To state balanceTBefore_To.generate_r1cs_witness(transfer.balanceUpdateT_To.before); accountBefore_To.generate_r1cs_witness(transfer.accountUpdate_To.before); // Operator state balanceBefore_O.generate_r1cs_witness(transfer.balanceUpdateF_O.before); // Inputs accountID_From.generate_r1cs_witness(pb, transfer.accountUpdate_From.accountID); accountID_To.generate_r1cs_witness(pb, transfer.accountUpdate_To.accountID); tokenID.generate_r1cs_witness(pb, transfer.balanceUpdateT_From.tokenID); amount.generate_r1cs_witness(pb, transfer.amount); feeTokenID.generate_r1cs_witness(pb, transfer.balanceUpdateF_From.tokenID); fee.generate_r1cs_witness(pb, transfer.fee); type.generate_r1cs_witness(pb, transfer.type); // Signature hash.generate_r1cs_witness(); signatureVerifier.generate_r1cs_witness(transfer.signature); // Type signatureInvalid.generate_r1cs_witness(); pb.val(numConditionalTransfersAfter.sum) = transfer.numConditionalTransfersAfter; type_eq_signatureInvalid.generate_r1cs_witness(); // User To account check publicKeyX_notZero.generate_r1cs_witness(); // Fee as float fFee.generate_r1cs_witness(toFloat(transfer.fee, Float16Encoding)); requireAccuracyFee.generate_r1cs_witness(); // Amount as float fAmount.generate_r1cs_witness(toFloat(transfer.amount, Float24Encoding)); requireAccuracyAmount.generate_r1cs_witness(); // Fee payment from From to the operator feePayment.generate_r1cs_witness(); // Transfer from From to To transferPayment.generate_r1cs_witness(); // Increase the nonce of From by 1 nonce_From_after.generate_r1cs_witness(); // Update User From updateBalanceF_From.generate_r1cs_witness(transfer.balanceUpdateF_From.proof); updateBalanceT_From.generate_r1cs_witness(transfer.balanceUpdateT_From.proof); updateAccount_From.generate_r1cs_witness(transfer.accountUpdate_From.proof); // Update User To updateBalanceT_To.generate_r1cs_witness(transfer.balanceUpdateT_To.proof); updateAccount_To.generate_r1cs_witness(transfer.accountUpdate_To.proof); // Update Operator updateBalanceF_O.generate_r1cs_witness(transfer.balanceUpdateF_O.proof); } void generate_r1cs_constraints() { // Inputs accountID_From.generate_r1cs_constraints(true); accountID_To.generate_r1cs_constraints(true); tokenID.generate_r1cs_constraints(true); amount.generate_r1cs_constraints(true); feeTokenID.generate_r1cs_constraints(true); fee.generate_r1cs_constraints(true); type.generate_r1cs_constraints(true); // Signature hash.generate_r1cs_constraints(); signatureVerifier.generate_r1cs_constraints(); // Type signatureInvalid.generate_r1cs_constraints(); numConditionalTransfersAfter.generate_r1cs_constraints(); type_eq_signatureInvalid.generate_r1cs_constraints(); // User To account check publicKeyX_notZero.generate_r1cs_constraints(); // Fee as float fFee.generate_r1cs_constraints(); requireAccuracyFee.generate_r1cs_constraints(); // Amount as float fAmount.generate_r1cs_constraints(); requireAccuracyAmount.generate_r1cs_constraints(); // Fee payment from From to the operator feePayment.generate_r1cs_constraints(); // Transfer from From to To transferPayment.generate_r1cs_constraints(); // Increase the nonce of From by 1 nonce_From_after.generate_r1cs_constraints(); // Update User From updateBalanceF_From.generate_r1cs_constraints(); updateBalanceT_From.generate_r1cs_constraints(); updateAccount_From.generate_r1cs_constraints(); // Update User To updateBalanceT_To.generate_r1cs_constraints(); updateAccount_To.generate_r1cs_constraints(); // Update Operator updateBalanceF_O.generate_r1cs_constraints(); } const std::vector<VariableArrayT> getPublicData() const { return {type.bits, accountID_From.bits, accountID_To.bits, VariableArrayT(2, constants.zero), tokenID.bits, VariableArrayT(2, constants.zero), feeTokenID.bits, fAmount.bits(), fFee.bits()}; } const VariableT& getNewAccountsRoot() const { return updateAccount_To.result(); } const VariableT& getNewOperatorBalancesRoot() const { return updateBalanceF_O.result(); } const VariableT& getNewNumConditionalTransfers() const { return numConditionalTransfersAfter.result(); } }; class InternalTransferCircuit : public Circuit { public: PublicDataGadget publicData; Constants constants; jubjub::Params params; // State AccountGadget accountBefore_O; // Inputs DualVariableGadget exchangeID; DualVariableGadget merkleRootBefore; DualVariableGadget merkleRootAfter; std::unique_ptr<libsnark::dual_variable_gadget<FieldT>> numConditionalTransfers; DualVariableGadget operatorAccountID; // Operator account check RequireNotZeroGadget publicKeyX_notZero; // Internal transfers bool onchainDataAvailability; unsigned int numTransfers; std::vector<InternalTransferGadget> transfers; // Update Operator std::unique_ptr<UpdateAccountGadget> updateAccount_O; InternalTransferCircuit(ProtoboardT &pb, const std::string &prefix) : Circuit(pb, prefix), publicData(pb, FMT(prefix, ".publicData")), constants(pb, FMT(prefix, ".constants")), // State accountBefore_O(pb, FMT(prefix, ".accountBefore_O")), // Inputs exchangeID(pb, NUM_BITS_EXCHANGE_ID, FMT(prefix, ".exchangeID")), merkleRootBefore(pb, 256, FMT(prefix, ".merkleRootBefore")), merkleRootAfter(pb, 256, FMT(prefix, ".merkleRootAfter")), operatorAccountID(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".operatorAccountID")), // Operator account check publicKeyX_notZero(pb, accountBefore_O.publicKey.x, FMT(prefix, ".publicKeyX_notZero")) { } void generateConstraints(bool onchainDataAvailability, unsigned int blockSize) override { this->onchainDataAvailability = onchainDataAvailability; this->numTransfers = blockSize; constants.generate_r1cs_constraints(); // Inputs exchangeID.generate_r1cs_constraints(true); merkleRootBefore.generate_r1cs_constraints(true); merkleRootAfter.generate_r1cs_constraints(true); operatorAccountID.generate_r1cs_constraints(true); // Operator account check publicKeyX_notZero.generate_r1cs_constraints(); // Internal transfers transfers.reserve(numTransfers); for (size_t j = 0; j < numTransfers; j++) { VariableT transAccountsRoot = (j == 0) ? merkleRootBefore.packed : transfers.back().getNewAccountsRoot(); VariableT transOperatorBalancesRoot = (j == 0) ? accountBefore_O.balancesRoot : transfers.back().getNewOperatorBalancesRoot(); transfers.emplace_back( pb, params, constants, transAccountsRoot, transOperatorBalancesRoot, exchangeID.packed, (j == 0) ? constants.zero : transfers.back().getNewNumConditionalTransfers(), std::string("transfer_") + std::to_string(j)); transfers.back().generate_r1cs_constraints(); } // Update Operator updateAccount_O.reset(new UpdateAccountGadget(pb, transfers.back().getNewAccountsRoot(), operatorAccountID.bits, {accountBefore_O.publicKey.x, accountBefore_O.publicKey.y, accountBefore_O.nonce, accountBefore_O.balancesRoot}, {accountBefore_O.publicKey.x, accountBefore_O.publicKey.y, accountBefore_O.nonce, transfers.back().getNewOperatorBalancesRoot()}, FMT(annotation_prefix, ".updateAccount_O"))); updateAccount_O->generate_r1cs_constraints(); // Num conditional transfers numConditionalTransfers.reset(new libsnark::dual_variable_gadget<FieldT>( pb, transfers.back().getNewNumConditionalTransfers(), 32, ".numConditionalTransfers") ); numConditionalTransfers->generate_r1cs_constraints(true); // Public data publicData.add(exchangeID.bits); publicData.add(merkleRootBefore.bits); publicData.add(merkleRootAfter.bits); publicData.add(numConditionalTransfers->bits); if (onchainDataAvailability) { publicData.add(operatorAccountID.bits); for (const InternalTransferGadget& transfer : transfers) { publicData.add(transfer.getPublicData()); } } publicData.generate_r1cs_constraints(); // Check the new merkle root requireEqual(pb, updateAccount_O->result(), merkleRootAfter.packed, "newMerkleRoot"); } bool generateWitness(const Loopring::InternalTransferBlock &block) { constants.generate_r1cs_witness(); // State accountBefore_O.generate_r1cs_witness(block.accountUpdate_O.before); // Inputs exchangeID.generate_r1cs_witness(pb, block.exchangeID); merkleRootBefore.generate_r1cs_witness(pb, block.merkleRootBefore); merkleRootAfter.generate_r1cs_witness(pb, block.merkleRootAfter); operatorAccountID.generate_r1cs_witness(pb, block.operatorAccountID); // Operator account check publicKeyX_notZero.generate_r1cs_witness(); // Internal transfers #ifdef MULTICORE #pragma omp parallel for #endif for (unsigned int i = 0; i < block.transfers.size(); i++) { transfers[i].generate_r1cs_witness(block.transfers[i]); } // Update operator updateAccount_O->generate_r1cs_witness(block.accountUpdate_O.proof); // Num conditional transfers numConditionalTransfers->generate_r1cs_witness_from_packed(); // Public data publicData.generate_r1cs_witness(); return true; } bool generateWitness(const json& input) override { return generateWitness(input.get<Loopring::InternalTransferBlock>()); } BlockType getBlockType() override { return BlockType::InternalTransfer; } unsigned int getBlockSize() override { return numTransfers; } void printInfo() override { std::cout << pb.num_constraints() << " constraints (" << (pb.num_constraints() / numTransfers) << "/transfer)" << std::endl; } }; } // namespace Loopring #endif
datatypes.h
#ifndef DATATYPES_H_ #define DATATYPES_H_ #include <stdbool.h> #include "../tools.h" #include "PlyDict.h" #include "ObjDict.h" #define MSG_HEAD_SEP "YGG_MSG_HEAD" /*! @brief Size of COMM buffer. */ #define COMMBUFFSIZ 2000 #define FMT_LEN 100 #ifdef __cplusplus /* If this is a C++ compiler, use C linkage */ extern "C" { #endif static char prefix_char = '#'; #ifdef _OPENMP #pragma omp threadprivate(prefix_char) #endif /*! @brief Bit flags. */ #define HEAD_FLAG_VALID 0x00000001 //!< Set if the header is valid. #define HEAD_FLAG_MULTIPART 0x00000002 //!< Set if the header is for a multipart message #define HEAD_TYPE_IN_DATA 0x00000004 //!< Set if the type is stored with the data during serialization #define HEAD_AS_ARRAY 0x00000008 //!< Set if messages will be serialized arrays /*! @brief C-friendly definition of MetaschemaType. */ typedef struct dtype_t { char type[COMMBUFFSIZ]; //!< Type name bool use_generic; //!< Flag for empty dtypes to specify generic in/out void *obj; //!< MetaschemaType Pointer } dtype_t; /*! @brief C-friendly defintion of YggGeneric. */ typedef struct generic_t { char prefix; //!< Prefix character for limited verification. void *obj; //!< Pointer to YggGeneric class. } generic_t; /*! @brief C-friendly definition of vector object. */ typedef generic_t json_array_t; /*! @brief C-friendly definition of map object. */ typedef generic_t json_object_t; /*! @brief C-friendly definition of schema object. */ typedef generic_t schema_t; /*! @brief C-friendly defintion of Python class object. */ typedef python_t python_class_t; /*! @brief C-friendly defintion of Python function object. */ typedef python_t python_function_t; /*! @brief C-friendly defintion of Python instance object. */ typedef generic_t python_instance_t; /*! @brief Macro wrapping call to PyObject_CallFunction. */ #define call_python(x, format, ...) PyObject_CallFunction(x.obj, format, __VA_ARGS__) /*! @brief Aliases to allow differentiation in parsing model definition. */ typedef char* unicode_t; typedef char* string_t; typedef char* bytes_t; /*! @brief Header information passed by comms for multipart messages. */ typedef struct comm_head_t { size_t bodysiz; //!< Size of body. size_t bodybeg; //!< Start of body in header. int flags; //!< Bit flags encoding the status of the header. int nargs_populated; //!< Number of arguments populated during deserialization. // size_t size; //!< Size of incoming message. char address[COMMBUFFSIZ]; //!< Address that message will comm in on. char id[COMMBUFFSIZ]; //!< Unique ID associated with this message. char response_address[COMMBUFFSIZ]; //!< Response address. char request_id[COMMBUFFSIZ]; //!< Request id. char zmq_reply[COMMBUFFSIZ]; //!< Reply address for ZMQ sockets. char zmq_reply_worker[COMMBUFFSIZ]; //!< Reply address for worker socket. char model[COMMBUFFSIZ]; //!< Name of model that sent the header. // These should be removed once JSON fully implemented int serializer_type; //!< Code indicating the type of serializer. char format_str[COMMBUFFSIZ]; //!< Format string for serializer. char field_names[COMMBUFFSIZ]; //!< String containing field names. char field_units[COMMBUFFSIZ]; //!< String containing field units. // dtype_t* dtype; //!< Type structure. } comm_head_t; /*! @brief C wrapper for the C++ delete_dtype_class function. @param x void* Pointer to MetaschemaType subclass that should be deleted. */ void delete_dtype_class_c(void* x); /*! @brief C wrapper for the C++ type_from_doc function. @param type_doc void* Pointer to const rapidjson::Value type doc. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns void* Pointer to MetaschemaType class. */ void* type_from_doc_c(const void* type_doc, const bool use_generic); /*! @brief C wrapper for the C++ type_from_pyobj function. @param pyobj void* Pointer to const rapidjson::Value type doc. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns void* Pointer to MetaschemaType class. */ void* type_from_pyobj_c(PyObject* pyobj, const bool use_generic); /*! @brief Determine if a datatype was created from a format. @param[in] type_struct dtype_t* Datatype structure. @returns int 1 if the datatype was created from a format, 0 if it was not, -1 if there is an error. */ int is_dtype_format_array(dtype_t* type_struct); /*! @brief Initialize an empty generic object. @returns generic_t New generic object structure. */ generic_t init_generic(); /*! @brief Initialize an empty array of mixed types with generic wrappers. @returns generic_t New generic object structure containing an empty array. */ generic_t init_generic_array(); /*! @brief Initialize an empty map (JSON object) of mixed types with generic wrappers. @returns generic_t New generic object structure contaiing an empty map (JSON object). */ generic_t init_generic_map(); /*! @brief Determine if the provided character matches the required generic prefix char. @param[in] x char Character to check. @returns int 1 if the character is the correct prefix, 0 otherwise. */ int is_generic_flag(char x); /*! @brief Determine if a generic structure is initialized. @param[in] x generic_t Generic structure to test. @returns int 1 if the structure is initialized, 0 otherwise. */ int is_generic_init(generic_t x); /*! @brief Create a generic object from the provided information. @param[in] type_class dtype_t* Type structure/class. @param[in] data void* Pointer to data. @param[in] nbytes size_t Size of data. @returns generic_t Pointer to new generic object structure. */ generic_t create_generic(dtype_t* type_class, void* data, size_t nbytes); /*! @brief Destroy a generic object. @param[in] x generic_t* Pointer to generic object structure to destory. @returns int -1 if unsuccessful, 0 otherwise. */ int destroy_generic(generic_t* x); /*! @brief Copy data from one generic object to the other. @param[in] src generic_t Generic structure that data should be copied from. @returns generic_t Copied structure. */ generic_t copy_generic(generic_t src); /*! @brief Display information about the generic type. @param[in] x generic_t* Wrapper for generic object. */ void display_generic(generic_t x); /*! @brief Return the recovered generic structure if one is present in the variable argument list. @param[in] nargs size_t Number of argument present in ap. @param[in] ap va_list_t Variable argument list. @returns generic_t Generic structure if one is present. */ generic_t get_generic_va(size_t nargs, va_list_t ap); /*! @brief Return the recovered generic structure if one is present in the variable argument list. @param[in] nargs size_t Number of argument present in ap. @param[in] ap va_list_t Variable argument list. @returns generic_t* Generic structure if one is present, NULL otherwise. */ generic_t* get_generic_va_ptr(size_t nargs, va_list_t ap); /*! @brief Return the recovered generic structure if one is present in the variable argument list by removing it. @param[in] nargs size_t* Pointer to number of arguments present in ap that will be decremented by 1. @param[in] ap va_list_t* Pointer to variable argument list. @returns generic_t Generic structure if one is present. */ generic_t pop_generic_va(size_t* nargs, va_list_t* ap); /*! @brief Return the recovered generic structure if one is present in the variable argument list by removing it. @param[in] nargs size_t* Pointer to number of arguments present in ap that will be decremented by 1. @param[in] ap va_list_t* Pointer to variable argument list. @returns generic_t* Generic structure if one is present, NULL otherwise. */ generic_t* pop_generic_va_ptr(size_t* nargs, va_list_t* ap); /*! @brief Add an element to the end of an array of generic elements. @param[in] arr generic_t Array to add element to. @param[in] x generic_t Element to add. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int add_generic_array(generic_t arr, generic_t x); /*! @brief Set an element in the array at a given index to a new value. @param[in] arr generic_t Array to add element to. @param[in] i size_t Index where element should be added. @param[in] x generic_t Element to add. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int set_generic_array(generic_t arr, size_t i, generic_t x); /*! @brief Get an element from an array. @param[in] arr generic_t Array to get element from. @param[in] i size_t Index of element to get. @param[out] x generic_t* Pointer to address where element should be stored. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int get_generic_array(generic_t arr, size_t i, generic_t *x); /*! @brief Set an element in the object at for a given key to a new value. @param[in] arr generic_t Object to add element to. @param[in] k const char* Key where element should be added. @param[in] x generic_t Element to add. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int set_generic_object(generic_t arr, const char* k, generic_t x); /*! @brief Get an element from an object. @param[in] arr generic_t Object to get element from. @param[in] k const char* Key of element to return. @param[out] x generic_t* Pointer to address where element should be stored. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int get_generic_object(generic_t arr, const char* k, generic_t *x); /*! @brief Get the number of elements in an array object. @param[in] x generic_t Generic object that is presumed to contain an array. @returns size_t Number of elements in array. */ size_t generic_array_get_size(generic_t x); /*! @brief Get an item from an array for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] type const char* Type of value expected. @returns void* Pointer to data for array item. */ void* generic_array_get_item(generic_t x, const size_t index, const char *type); /*! @brief Get the size of an item from an array in bytes. @param[in] x Generic object that is presumed to contain an array. @param[in] index Index for value that the size should be returned for. @returns Size of the item in bytes. */ int generic_array_get_item_nbytes(generic_t x, const size_t index); bool generic_array_get_bool(generic_t x, const size_t index); int generic_array_get_integer(generic_t x, const size_t index); void* generic_array_get_null(generic_t x, const size_t index); double generic_array_get_number(generic_t x, const size_t index); char* generic_array_get_string(generic_t x, const size_t index); generic_t generic_array_get_object(generic_t x, const size_t index); generic_t generic_array_get_array(generic_t x, const size_t index); char* generic_array_get_direct(generic_t x, const size_t index); ply_t generic_array_get_ply(generic_t x, const size_t index); obj_t generic_array_get_obj(generic_t x, const size_t index); python_t generic_array_get_python_class(generic_t x, const size_t index); python_t generic_array_get_python_function(generic_t x, const size_t index); schema_t generic_array_get_schema(generic_t x, const size_t index); generic_t generic_array_get_any(generic_t x, const size_t index); /*! @brief Get a scalar value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] subtype const char* Subtype of scalar expected. @param[in] precision const int Precision of scalar that is expected. @returns void* Pointer to scalar data. */ void* generic_array_get_scalar(generic_t x, const size_t index, const char *subtype, const size_t precision); int8_t generic_array_get_int8(generic_t x, const size_t index); int16_t generic_array_get_int16(generic_t x, const size_t index); int32_t generic_array_get_int32(generic_t x, const size_t index); int64_t generic_array_get_int64(generic_t x, const size_t index); uint8_t generic_array_get_uint8(generic_t x, const size_t index); uint16_t generic_array_get_uint16(generic_t x, const size_t index); uint32_t generic_array_get_uint32(generic_t x, const size_t index); uint64_t generic_array_get_uint64(generic_t x, const size_t index); float generic_array_get_float(generic_t x, const size_t index); double generic_array_get_double(generic_t x, const size_t index); long double generic_array_get_long_double(generic_t x, const size_t index); complex_float_t generic_array_get_complex_float(generic_t x, const size_t index); complex_double_t generic_array_get_complex_double(generic_t x, const size_t index); complex_long_double_t generic_array_get_complex_long_double(generic_t x, const size_t index); char* generic_array_get_bytes(generic_t x, const size_t index); char* generic_array_get_unicode(generic_t x, const size_t index); /*! @brief Get a 1d array value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to pointer that should be reallocated to store the data. @returns size_t Number of elements in the data. */ size_t generic_array_get_1darray(generic_t x, const size_t index, const char *subtype, const size_t precision, void** data); size_t generic_array_get_1darray_int8(generic_t x, const size_t index, int8_t** data); size_t generic_array_get_1darray_int16(generic_t x, const size_t index, int16_t** data); size_t generic_array_get_1darray_int32(generic_t x, const size_t index, int32_t** data); size_t generic_array_get_1darray_int64(generic_t x, const size_t index, int64_t** data); size_t generic_array_get_1darray_uint8(generic_t x, const size_t index, uint8_t** data); size_t generic_array_get_1darray_uint16(generic_t x, const size_t index, uint16_t** data); size_t generic_array_get_1darray_uint32(generic_t x, const size_t index, uint32_t** data); size_t generic_array_get_1darray_uint64(generic_t x, const size_t index, uint64_t** data); size_t generic_array_get_1darray_float(generic_t x, const size_t index, float** data); size_t generic_array_get_1darray_double(generic_t x, const size_t index, double** data); size_t generic_array_get_1darray_long_double(generic_t x, const size_t index, long double** data); size_t generic_array_get_1darray_complex_float(generic_t x, const size_t index, complex_float_t** data); size_t generic_array_get_1darray_complex_double(generic_t x, const size_t index, complex_double_t** data); size_t generic_array_get_1darray_complex_long_double(generic_t x, const size_t index, complex_long_double_t** data); size_t generic_array_get_1darray_bytes(generic_t x, const size_t index, char** data); size_t generic_array_get_1darray_unicode(generic_t x, const size_t index, char** data); /*! @brief Get a nd array value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to array that should be reallocated to store the data. @param[out] shape size_t** Pointer to array that should be reallocated to store the array shape in each dimension. @returns size_t Number of dimensions in the array. */ size_t generic_array_get_ndarray(generic_t x, const size_t index, const char *subtype, const size_t precision, void** data, size_t** shape); size_t generic_array_get_ndarray_int8(generic_t x, const size_t index, int8_t** data, size_t** shape); size_t generic_array_get_ndarray_int16(generic_t x, const size_t index, int16_t** data, size_t** shape); size_t generic_array_get_ndarray_int32(generic_t x, const size_t index, int32_t** data, size_t** shape); size_t generic_array_get_ndarray_int64(generic_t x, const size_t index, int64_t** data, size_t** shape); size_t generic_array_get_ndarray_uint8(generic_t x, const size_t index, uint8_t** data, size_t** shape); size_t generic_array_get_ndarray_uint16(generic_t x, const size_t index, uint16_t** data, size_t** shape); size_t generic_array_get_ndarray_uint32(generic_t x, const size_t index, uint32_t** data, size_t** shape); size_t generic_array_get_ndarray_uint64(generic_t x, const size_t index, uint64_t** data, size_t** shape); size_t generic_array_get_ndarray_float(generic_t x, const size_t index, float** data, size_t** shape); size_t generic_array_get_ndarray_double(generic_t x, const size_t index, double** data, size_t** shape); size_t generic_array_get_ndarray_long_double(generic_t x, const size_t index, long double** data, size_t** shape); size_t generic_array_get_ndarray_complex_float(generic_t x, const size_t index, complex_float_t** data, size_t** shape); size_t generic_array_get_ndarray_complex_double(generic_t x, const size_t index, complex_double_t** data, size_t** shape); size_t generic_array_get_ndarray_complex_long_double(generic_t x, const size_t index, complex_long_double_t** data, size_t** shape); size_t generic_array_get_ndarray_bytes(generic_t x, const size_t index, char** data, size_t** shape); size_t generic_array_get_ndarray_unicode(generic_t x, const size_t index, char** data, size_t** shape); /*! @brief Get the number of elements in an map object. @param[in] x generic_t Generic object that is presumed to contain a map. @returns size_t Number of elements in map. */ size_t generic_map_get_size(generic_t x); /*! @brief Determine if a map object has a certain key. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key char* Key to check for. @returns int 1 if the key is present, 0 otherwise. */ int generic_map_has_key(generic_t x, char* key); /*! @brief Get the keys in a map object. @param[in] x generic_t Generic object that is presumed to contain a map. @param[out] keys char*** Pointer to memory where array of keys should be stored. @returns size_t Number of keys in map. */ size_t generic_map_get_keys(generic_t x, char*** keys); /*! @brief Get an item from a map for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] type const char* Type of value expected. @returns void* Pointer to data for map item. */ void* generic_map_get_item(generic_t x, const char* key, const char *type); int generic_map_get_item_nbytes(generic_t x, const char* key); bool generic_map_get_bool(generic_t x, const char* key); int generic_map_get_integer(generic_t x, const char* key); void* generic_map_get_null(generic_t x, const char* key); double generic_map_get_number(generic_t x, const char* key); char* generic_map_get_string(generic_t x, const char* key); generic_t generic_map_get_object(generic_t x, const char* key); generic_t generic_map_get_array(generic_t x, const char* key); char* generic_map_get_direct(generic_t x, const char* key); ply_t generic_map_get_ply(generic_t x, const char* key); obj_t generic_map_get_obj(generic_t x, const char* key); python_t generic_map_get_python_class(generic_t x, const char* key); python_t generic_map_get_python_function(generic_t x, const char* key); schema_t generic_map_get_schema(generic_t x, const char* key); generic_t generic_map_get_any(generic_t x, const char* key); /*! @brief Get a scalar value from a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] subtype const char* Subtype of scalar expected. @param[in] precision const int Precision of scalar that is expected. @returns void* Pointer to scalar data. */ void* generic_map_get_scalar(generic_t x, const char* key, const char *subtype, const size_t precision); int8_t generic_map_get_int8(generic_t x, const char* key); int16_t generic_map_get_int16(generic_t x, const char* key); int32_t generic_map_get_int32(generic_t x, const char* key); int64_t generic_map_get_int64(generic_t x, const char* key); uint8_t generic_map_get_uint8(generic_t x, const char* key); uint16_t generic_map_get_uint16(generic_t x, const char* key); uint32_t generic_map_get_uint32(generic_t x, const char* key); uint64_t generic_map_get_uint64(generic_t x, const char* key); float generic_map_get_float(generic_t x, const char* key); double generic_map_get_double(generic_t x, const char* key); long double generic_map_get_long_double(generic_t x, const char* key); complex_float_t generic_map_get_complex_float(generic_t x, const char* key); complex_double_t generic_map_get_complex_double(generic_t x, const char* key); complex_long_double_t generic_map_get_complex_long_double(generic_t x, const char* key); char* generic_map_get_bytes(generic_t x, const char* key); char* generic_map_get_unicode(generic_t x, const char* key); /*! @brief Get a 1d array value from a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to pointer that should be reallocated to store the data. @returns size_t Number of elements in the data. */ size_t generic_map_get_1darray(generic_t x, const char* key, const char *subtype, const size_t precision, void** data); size_t generic_map_get_1darray_int8(generic_t x, const char* key, int8_t** data); size_t generic_map_get_1darray_int16(generic_t x, const char* key, int16_t** data); size_t generic_map_get_1darray_int32(generic_t x, const char* key, int32_t** data); size_t generic_map_get_1darray_int64(generic_t x, const char* key, int64_t** data); size_t generic_map_get_1darray_uint8(generic_t x, const char* key, uint8_t** data); size_t generic_map_get_1darray_uint16(generic_t x, const char* key, uint16_t** data); size_t generic_map_get_1darray_uint32(generic_t x, const char* key, uint32_t** data); size_t generic_map_get_1darray_uint64(generic_t x, const char* key, uint64_t** data); size_t generic_map_get_1darray_float(generic_t x, const char* key, float** data); size_t generic_map_get_1darray_double(generic_t x, const char* key, double** data); size_t generic_map_get_1darray_long_double(generic_t x, const char* key, long double** data); size_t generic_map_get_1darray_complex_float(generic_t x, const char* key, complex_float_t** data); size_t generic_map_get_1darray_complex_double(generic_t x, const char* key, complex_double_t** data); size_t generic_map_get_1darray_complex_long_double(generic_t x, const char* key, complex_long_double_t** data); size_t generic_map_get_1darray_bytes(generic_t x, const char* key, char** data); size_t generic_map_get_1darray_unicode(generic_t x, const char* key, char** data); /*! @brief Get a nd array value from a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to array that should be reallocated to store the data. @param[out] shape size_t** Pointer to array that should be reallocated to store the array shape in each dimension. @returns size_t Number of dimensions in the array. */ size_t generic_map_get_ndarray(generic_t x, const char* key, const char *subtype, const size_t precision, void** data, size_t** shape); size_t generic_map_get_ndarray_int8(generic_t x, const char* key, int8_t** data, size_t** shape); size_t generic_map_get_ndarray_int16(generic_t x, const char* key, int16_t** data, size_t** shape); size_t generic_map_get_ndarray_int32(generic_t x, const char* key, int32_t** data, size_t** shape); size_t generic_map_get_ndarray_int64(generic_t x, const char* key, int64_t** data, size_t** shape); size_t generic_map_get_ndarray_uint8(generic_t x, const char* key, uint8_t** data, size_t** shape); size_t generic_map_get_ndarray_uint16(generic_t x, const char* key, uint16_t** data, size_t** shape); size_t generic_map_get_ndarray_uint32(generic_t x, const char* key, uint32_t** data, size_t** shape); size_t generic_map_get_ndarray_uint64(generic_t x, const char* key, uint64_t** data, size_t** shape); size_t generic_map_get_ndarray_float(generic_t x, const char* key, float** data, size_t** shape); size_t generic_map_get_ndarray_double(generic_t x, const char* key, double** data, size_t** shape); size_t generic_map_get_ndarray_long_double(generic_t x, const char* key, long double** data, size_t** shape); size_t generic_map_get_ndarray_complex_float(generic_t x, const char* key, complex_float_t** data, size_t** shape); size_t generic_map_get_ndarray_complex_double(generic_t x, const char* key, complex_double_t** data, size_t** shape); size_t generic_map_get_ndarray_complex_long_double(generic_t x, const char* key, complex_long_double_t** data, size_t** shape); size_t generic_map_get_ndarray_bytes(generic_t x, const char* key, char** data, size_t** shape); size_t generic_map_get_ndarray_unicode(generic_t x, const char* key, char** data, size_t** shape); /*! @brief Set an item in an array for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] type const char* Type of value being set. @param[in] value void* Pointer to data that item should be set to. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_item(generic_t x, const size_t index, const char *type, void* value); int generic_array_set_bool(generic_t x, const size_t index, bool value); int generic_array_set_integer(generic_t x, const size_t index, int value); int generic_array_set_null(generic_t x, const size_t index, void* value); int generic_array_set_number(generic_t x, const size_t index, double value); int generic_array_set_string(generic_t x, const size_t index, char* value); int generic_array_set_object(generic_t x, const size_t index, generic_t value); int generic_array_set_map(generic_t x, const size_t index, generic_t value); int generic_array_set_array(generic_t x, const size_t index, generic_t value); int generic_array_set_direct(generic_t x, const size_t index, char* value); int generic_array_set_ply(generic_t x, const size_t index, ply_t value); int generic_array_set_obj(generic_t x, const size_t index, obj_t value); int generic_array_set_python_class(generic_t x, const size_t index, python_t value); int generic_array_set_python_function(generic_t x, const size_t index, python_t value); int generic_array_set_schema(generic_t x, const size_t index, schema_t value); int generic_array_set_any(generic_t x, const size_t index, generic_t value); /*! @brief Set a scalar value in an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] value void* Pointer to scalar data. @param[in] subtype const char* Subtype of scalar in value. @param[in] precision const int Precision of scalar in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_scalar(generic_t x, const size_t index, void* value, const char *subtype, const size_t precision, const char* units); int generic_array_set_int8(generic_t x, const size_t index, int8_t value, const char* units); int generic_array_set_int16(generic_t x, const size_t index, int16_t value, const char* units); int generic_array_set_int32(generic_t x, const size_t index, int32_t value, const char* units); int generic_array_set_int64(generic_t x, const size_t index, int64_t value, const char* units); int generic_array_set_uint8(generic_t x, const size_t index, uint8_t value, const char* units); int generic_array_set_uint16(generic_t x, const size_t index, uint16_t value, const char* units); int generic_array_set_uint32(generic_t x, const size_t index, uint32_t value, const char* units); int generic_array_set_uint64(generic_t x, const size_t index, uint64_t value, const char* units); int generic_array_set_float(generic_t x, const size_t index, float value, const char* units); int generic_array_set_double(generic_t x, const size_t index, double value, const char* units); int generic_array_set_long_double(generic_t x, const size_t index, long double value, const char* units); int generic_array_set_complex_float(generic_t x, const size_t index, complex_float_t value, const char* units); int generic_array_set_complex_double(generic_t x, const size_t index, complex_double_t value, const char* units); int generic_array_set_complex_long_double(generic_t x, const size_t index, complex_long_double_t value, const char* units); int generic_array_set_bytes(generic_t x, const size_t index, char* value, const char* units); int generic_array_set_unicode(generic_t x, const size_t index, char* value, const char* units); /*! @brief Set a 1d array value in an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] value void* Pointer to array data. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[in] length const size_t Number of elements in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_1darray(generic_t x, const size_t index, void* value, const char *subtype, const size_t precision, const size_t length, const char* units); int generic_array_set_1darray_int8(generic_t x, const size_t index, int8_t* value, const size_t length, const char* units); int generic_array_set_1darray_int16(generic_t x, const size_t index, int16_t* value, const size_t length, const char* units); int generic_array_set_1darray_int32(generic_t x, const size_t index, int32_t* value, const size_t length, const char* units); int generic_array_set_1darray_int64(generic_t x, const size_t index, int64_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint8(generic_t x, const size_t index, uint8_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint16(generic_t x, const size_t index, uint16_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint32(generic_t x, const size_t index, uint32_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint64(generic_t x, const size_t index, uint64_t* value, const size_t length, const char* units); int generic_array_set_1darray_float(generic_t x, const size_t index, float* value, const size_t length, const char* units); int generic_array_set_1darray_double(generic_t x, const size_t index, double* value, const size_t length, const char* units); int generic_array_set_1darray_long_double(generic_t x, const size_t index, long double* value, const size_t length, const char* units); int generic_array_set_1darray_complex_float(generic_t x, const size_t index, complex_float_t* value, const size_t length, const char* units); int generic_array_set_1darray_complex_double(generic_t x, const size_t index, complex_double_t* value, const size_t length, const char* units); int generic_array_set_1darray_complex_long_double(generic_t x, const size_t index, complex_long_double_t* value, const size_t length, const char* units); int generic_array_set_1darray_bytes(generic_t x, const size_t index, char** value, const size_t length, const char* units); int generic_array_set_1darray_unicode(generic_t x, const size_t index, char** value, const size_t length, const char* units); /*! @brief Set a nd array value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] data void* Pointer to array data. @param[in] subtype const char* Subtype of array in value. @param[in] precision const size_t Precision of array that is in value. @param[in] ndim size_t Number of dimensions in the array. @param[in] shape size_t* Pointer to array containing the size of the array in each dimension. @param[in] units const char* Units that should be added to the array. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_ndarray(generic_t x, const size_t index, void* data, const char *subtype, const size_t precision, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int8(generic_t x, const size_t index, int8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int16(generic_t x, const size_t index, int16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int32(generic_t x, const size_t index, int32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int64(generic_t x, const size_t index, int64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint8(generic_t x, const size_t index, uint8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint16(generic_t x, const size_t index, uint16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint32(generic_t x, const size_t index, uint32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint64(generic_t x, const size_t index, uint64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_float(generic_t x, const size_t index, float* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_double(generic_t x, const size_t index, double* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_long_double(generic_t x, const size_t index, long double* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_complex_float(generic_t x, const size_t index, complex_float_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_complex_double(generic_t x, const size_t index, complex_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_complex_long_double(generic_t x, const size_t index, complex_long_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_bytes(generic_t x, const size_t index, char** data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_unicode(generic_t x, const size_t index, char** data, const size_t ndim, const size_t* shape, const char* units); /*! @brief Set an item from a map for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] type const char* Type of value being set. @param[in] value void* Pointer to data that item should be set to. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_item(generic_t x, const char* key, const char* type, void* value); int generic_map_set_bool(generic_t x, const char* key, bool value); int generic_map_set_integer(generic_t x, const char* key, int value); int generic_map_set_null(generic_t x, const char* key, void* value); int generic_map_set_number(generic_t x, const char* key, double value); int generic_map_set_string(generic_t x, const char* key, char* value); int generic_map_set_object(generic_t x, const char* key, generic_t value); int generic_map_set_map(generic_t x, const char* key, generic_t value); int generic_map_set_array(generic_t x, const char* key, generic_t value); int generic_map_set_direct(generic_t x, const char* key, char* value); int generic_map_set_ply(generic_t x, const char* key, ply_t value); int generic_map_set_obj(generic_t x, const char* key, obj_t value); int generic_map_set_python_class(generic_t x, const char* key, python_t value); int generic_map_set_python_function(generic_t x, const char* key, python_t value); int generic_map_set_schema(generic_t x, const char* key, schema_t value); int generic_map_set_any(generic_t x, const char* key, generic_t value); /*! @brief Set a scalar value in a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] value void* Pointer to scalar data. @param[in] subtype const char* Subtype of scalar in value. @param[in] precision const int Precision of scalar in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_scalar(generic_t x, const char* key, void* value, const char *subtype, const size_t precision, const char* units); int generic_map_set_int8(generic_t x, const char* key, int8_t value, const char* units); int generic_map_set_int16(generic_t x, const char* key, int16_t value, const char* units); int generic_map_set_int32(generic_t x, const char* key, int32_t value, const char* units); int generic_map_set_int64(generic_t x, const char* key, int64_t value, const char* units); int generic_map_set_uint8(generic_t x, const char* key, uint8_t value, const char* units); int generic_map_set_uint16(generic_t x, const char* key, uint16_t value, const char* units); int generic_map_set_uint32(generic_t x, const char* key, uint32_t value, const char* units); int generic_map_set_uint64(generic_t x, const char* key, uint64_t value, const char* units); int generic_map_set_float(generic_t x, const char* key, float value, const char* units); int generic_map_set_double(generic_t x, const char* key, double value, const char* units); int generic_map_set_long_double(generic_t x, const char* key, long double value, const char* units); int generic_map_set_complex_float(generic_t x, const char* key, complex_float_t value, const char* units); int generic_map_set_complex_double(generic_t x, const char* key, complex_double_t value, const char* units); int generic_map_set_complex_long_double(generic_t x, const char* key, complex_long_double_t value, const char* units); int generic_map_set_bytes(generic_t x, const char* key, char* value, const char* units); int generic_map_set_unicode(generic_t x, const char* key, char* value, const char* units); /*! @brief Set a 1d array value in a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] value void* Pointer to array data. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[in] length const size_t Number of elements in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_1darray(generic_t x, const char* key, void* value, const char *subtype, const size_t precision, const size_t length, const char* units); int generic_map_set_1darray_int8(generic_t x, const char* key, int8_t* value, const size_t length, const char* units); int generic_map_set_1darray_int16(generic_t x, const char* key, int16_t* value, const size_t length, const char* units); int generic_map_set_1darray_int32(generic_t x, const char* key, int32_t* value, const size_t length, const char* units); int generic_map_set_1darray_int64(generic_t x, const char* key, int64_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint8(generic_t x, const char* key, uint8_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint16(generic_t x, const char* key, uint16_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint32(generic_t x, const char* key, uint32_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint64(generic_t x, const char* key, uint64_t* value, const size_t length, const char* units); int generic_map_set_1darray_float(generic_t x, const char* key, float* value, const size_t length, const char* units); int generic_map_set_1darray_double(generic_t x, const char* key, double* value, const size_t length, const char* units); int generic_map_set_1darray_long_double(generic_t x, const char* key, long double* value, const size_t length, const char* units); int generic_map_set_1darray_complex_float(generic_t x, const char* key, complex_float_t* value, const size_t length, const char* units); int generic_map_set_1darray_complex_double(generic_t x, const char* key, complex_double_t* value, const size_t length, const char* units); int generic_map_set_1darray_complex_long_double(generic_t x, const char* key, complex_long_double_t* value, const size_t length, const char* units); int generic_map_set_1darray_bytes(generic_t x, const char* key, char** value, const size_t length, const char* units); int generic_map_set_1darray_unicode(generic_t x, const char* key, char** value, const size_t length, const char* units); /*! @brief Set a nd array value in a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] data void* Pointer to array data. @param[in] subtype const char* Subtype of array in value. @param[in] precision const size_t Precision of array that is in value. @param[in] ndim size_t Number of dimensions in the array. @param[in] shape size_t* Pointer to array containing the size of the array in each dimension. @param[in] units const char* Units that should be added to the array. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_ndarray(generic_t x, const char* key, void* data, const char *subtype, const size_t precision, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int8(generic_t x, const char* key, int8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int16(generic_t x, const char* key, int16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int32(generic_t x, const char* key, int32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int64(generic_t x, const char* key, int64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint8(generic_t x, const char* key, uint8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint16(generic_t x, const char* key, uint16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint32(generic_t x, const char* key, uint32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint64(generic_t x, const char* key, uint64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_float(generic_t x, const char* key, float* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_double(generic_t x, const char* key, double* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_long_double(generic_t x, const char* key, long double* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_complex_float(generic_t x, const char* key, complex_float_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_complex_double(generic_t x, const char* key, complex_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_complex_long_double(generic_t x, const char* key, complex_long_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_bytes(generic_t x, const char* key, char** data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_unicode(generic_t x, const char* key, char** data, const size_t ndim, const size_t* shape, const char* units); /*! >>>>>>> topic/timesync @brief Destroy a structure containing a Python object. @param[in] x python_t* Pointer to Python object structure that should be freed. */ void destroy_python(python_t *x); /*! @brief Copy a Python object structure (NOTE: this dosn't copy the underlying Python object but does increment the reference count). @param[in] x python_t Structure containing Python object to copy. @returns python_t Copy of x. */ python_t copy_python(python_t x); /*! @brief Display a Python object structure. @param[in] x python_t Structure containing Python object to display. */ void display_python(python_t x); /*! @brief Destroy a structure containing a Python function object. @param[in] x python_function_t* Pointer to Python function structure that should be freed. */ void destroy_python_function(python_function_t *x); /*! @brief Skip datatype arguments. @param[in] dtype dtype_t* Type structure to skip arguments for. @param[in, out] nargs Pointer to number of arguments in ap. @param[in, out] ap va_list_t Variable argument list. @returns int 0 if there are no errors, 1 otherwise. */ int skip_va_elements(const dtype_t* dtype, size_t *nargs, va_list_t *ap); /*! @brief Determine if a datatype is empty. @param[in] dtype dtype_t* Type structure to test. @returns int 1 if dtype is empty, 0 otherwise. */ int is_empty_dtype(const dtype_t* dtype); /*! @brief Get the name of the type from the class. @param[in] type_class dtype_t* Type structure/class. @returns const char* Type name. */ const char* dtype_name(const dtype_t* type_class); /*! @brief Get the subtype of the type. @param[in] type_class dtype_t* Type structure/class. @returns const char* The subtype of the class, "" if there is an error. */ const char* dtype_subtype(const dtype_t* type_class); /*! @brief Get the precision of the type. @param[in] type_class dtype_t* Type structure/class. @returns const size_t The precision of the class, 0 if there is an error. */ const size_t dtype_precision(const dtype_t* type_class); /*! @brief Initialize a datatype structure including setting the type string. @param[in] dtype dtype_t* Type structure/class. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Initialized type structure/class. */ dtype_t* complete_dtype(dtype_t *dtype, const bool use_generic); /*! @brief Construct and empty type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_empty(const bool use_generic); /*! @brief Create a datatype based on a JSON document. @param type_doc void* Pointer to const rapidjson::Value type doc. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_doc(void* type_doc, const bool use_generic); /*! @brief Create a datatype based on a Python dictionary. @param[in] pyobj PyObject* Python dictionary. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_python(PyObject* pyobj, const bool use_generic); /*! @brief Construct a Direct type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_direct(const bool use_generic); /*! @brief Construct a type object for one of the default JSON types. @param[in] type char* Name of the type. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_default(const char* type, const bool use_generic); /*! @brief Construct a Scalar type object. @param[in] subtype char* Name of the scalar subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the scalar in bits. @param[in] units char* Units for scalar. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_scalar(const char* subtype, const size_t precision, const char* units, const bool use_generic); /*! @brief Construct a 1D array type object. @param[in] subtype char* Name of the array subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the array in bits. @param[in] length size_t Number of elements in the array. @param[in] units char* Units for array elements. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_1darray(const char* subtype, const size_t precision, const size_t length, const char* units, const bool use_generic); /*! @brief Construct a ND array type object. @param[in] subtype char* Name of the array subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the array in bits. @param[in] ndim size_t Number of dimensions in the array (and therefore also the number of elements in shape). @param[in] shape size_t* Pointer to array where each element is the size of the array in that dimension. @param[in] units char* Units for array elements. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ndarray(const char* subtype, const size_t precision, const size_t ndim, const size_t* shape, const char* units, const bool use_generic); /*! @brief Construct a ND array type object. @param[in] subtype char* Name of the array subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the array in bits. @param[in] ndim size_t Number of dimensions in the array (and therefore also the number of elements in shape). @param[in] shape[] size_t Array where each element is the size of the array in that dimension. @param[in] units char* Units for array elements. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ndarray_arr(const char* subtype, const size_t precision, const size_t ndim, const int64_t shape[], const char* units, const bool use_generic); /*! @brief Construct a JSON array type object. @param[in] nitems size_t Number of types in items. @param[in] items dtype_t** Pointer to array of types describing the array elements. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_json_array(const size_t nitems, dtype_t** items, const bool use_generic); /*! @brief Construct a JSON object type object. @param[in] nitems size_t Number of keys/types in keys and values. @param[in] keys char** Pointer to array of keys for each type. @param[in] values dtype_t** Pointer to array of types describing the values for each key. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_json_object(const size_t nitems, char** keys, dtype_t** values, const bool use_generic); /*! @brief Construct a Ply type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ply(const bool use_generic); /*! @brief Construct a Obj type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_obj(const bool use_generic); /*! @brief Construct an AsciiTable type object. @param[in] format_str const char* C-style format string that will be used to determine the type of elements in arrays that will be serialized/deserialized using the resulting type. @param[in] as_array int If 1, the types will be arrays. Otherwise they will be scalars. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ascii_table(const char *format_str, const int as_array, const bool use_generic); /*! @brief Construct a type object based on the provided format string. @param[in] format_str const char* C-style format string that will be used to determine the type of elements in arrays that will be serialized/deserialized using the resulting type. @param[in] as_array int If 1, the types will be arrays. Otherwise they will be scalars. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_format(const char *format_str, const int as_array, const bool use_generic); /*! @brief Construct a type object for Python objects. @param[in] type char* Type string. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_pyobj(const char* type, const bool use_generic); /*! @brief Construct a type object for Python object instances. @param[in] class_name char* Python class name. @param[in] args_dtype dtype_t* Datatype describing the arguments creating the instance. @param[in] kwargs_dtype dtype_t* Datatype describing the keyword arguments creating the instance. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_pyinst(const char* class_name, const dtype_t* args_dtype, const dtype_t* kwargs_dtype, const bool use_generic); /*! @brief Construct a type object for a schema. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_schema(const bool use_generic); /*! @brief Construct a type object for receiving any type. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_any(const bool use_generic); /*! @brief Wrapper for freeing MetaschemaType class wrapper struct. @param[in] dtype dtype_t** Wrapper struct for C++ Metaschema type class. @returns: int 0 if free was successfull, -1 if there was an error. */ int destroy_dtype(dtype_t** dtype); /*! @brief Initialize a header struct. @param[in] size size_t Size of message to be sent. @param[in] address char* Address that should be used for remainder of message following this header if it is a multipart message. @param[in] id char* Message ID. @returns comm_head_t Structure with provided information, char arrays correctly initialized to empty strings if NULLs provided. */ static inline comm_head_t init_header(const size_t size, const char *address, const char *id) { comm_head_t out; // Parameters set during read out.bodysiz = 0; out.bodybeg = 0; out.flags = HEAD_FLAG_VALID; out.nargs_populated = 0; // Parameters sent in header out.size = size; if (address == NULL) out.address[0] = '\0'; else strncpy(out.address, address, COMMBUFFSIZ); if (id == NULL) out.id[0] = '\0'; else strncpy(out.id, id, COMMBUFFSIZ); out.response_address[0] = '\0'; out.request_id[0] = '\0'; out.zmq_reply[0] = '\0'; out.zmq_reply_worker[0] = '\0'; out.model[0] = '\0'; // Parameters that will be removed out.serializer_type = -1; out.format_str[0] = '\0'; // Parameters used for type out.dtype = NULL; return out; }; /*! @brief Destroy a header object. @param[in] x comm_head_t* Pointer to the header that should be destroyed. @returns int 0 if successful, -1 otherwise. */ static inline int destroy_header(comm_head_t* x) { int ret = 0; if (x->dtype != NULL) { ret = destroy_dtype(&(x->dtype)); } return ret; }; /*! @brief Split header and body of message. @param[in] buf const char* Message that should be split. @param[in] buf_siz size_t Size of buf. @param[out] head const char** pointer to buffer where the extracted header should be stored. @param[out] headsiz size_t reference to memory where size of extracted header should be stored. @returns: int 0 if split is successful, -1 if there was an error. */ static inline int split_head_body(const char *buf, const size_t buf_siz, char **head, size_t *headsiz) { // Split buffer into head and body int ret; size_t sind, eind, sind_head, eind_head; sind = 0; eind = 0; #ifdef _WIN32 // Windows regex of newline is buggy UNUSED(buf_siz); size_t sind1, eind1, sind2, eind2; char re_head_tag[COMMBUFFSIZ]; sprintf(re_head_tag, "(%s)", MSG_HEAD_SEP); ret = find_match(re_head_tag, buf, &sind1, &eind1); if (ret > 0) { sind = sind1; ret = find_match(re_head_tag, buf + eind1, &sind2, &eind2); if (ret > 0) eind = eind1 + eind2; } #else // Extract just header char re_head[COMMBUFFSIZ] = MSG_HEAD_SEP; strcat(re_head, "(.*)"); strcat(re_head, MSG_HEAD_SEP); // strcat(re_head, ".*"); ret = find_match(re_head, buf, &sind, &eind); #endif if (ret < 0) { ygglog_error("split_head_body: Could not find header in '%.1000s'", buf); return -1; } else if (ret == 0) { ygglog_debug("split_head_body: No header in '%.1000s...'", buf); sind_head = 0; eind_head = 0; } else { sind_head = sind + strlen(MSG_HEAD_SEP); eind_head = eind - strlen(MSG_HEAD_SEP); } headsiz[0] = (eind_head - sind_head); char* temp = (char*)realloc(*head, *headsiz + 1); if (temp == NULL) { ygglog_error("split_head_body: Failed to reallocate header."); return -1; } *head = temp; memcpy(*head, buf + sind_head, *headsiz); (*head)[*headsiz] = '\0'; return 0; }; /*! @brief Format header to a string. @param[in] head comm_head_t* Pointer to header to be formatted. @param[out] buf char ** Pointer to buffer where header should be written. @param[in] buf_siz size_t Size of buf. @param[in] max_header_size size_t Maximum size that header can occupy before the type should be moved to the data portion of the message. @param[in] no_type int If 1, type information will not be added to the header. If 0, it will be. @returns: int Size of header written. */ int format_comm_header(comm_head_t *head, char **buf, size_t buf_siz, const size_t max_header_size, const int no_type); /*! @brief Extract type from data and updated header. @param[in] buf char** Pointer to data containing type. @param[in] buf_siz size_t Size of buf. @param[in,out] head comm_head_t* Pointer to header structure that should be updated. @returns: int -1 if there is an error, size of adjusted data that dosn't include type otherwise. */ int parse_type_in_data(char **buf, const size_t buf_siz, comm_head_t* head); /*! @brief Extract header information from a string. @param[in] buf const char* Message that header should be extracted from. @param[in] buf_siz size_t Size of buf. @returns: comm_head_t Header information structure. */ comm_head_t parse_comm_header(const char *buf, const size_t buf_siz); /*! @brief Get the ascii table data structure. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @returns: void* Cast pointer to ascii table. */ void* dtype_ascii_table(const dtype_t* dtype); /*! @brief Get a copy of a type structure. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @returns: dtype_t* Type class. */ dtype_t* copy_dtype(const dtype_t* dtype); /*! @brief Wrapper for updating a type object with information from another. @param[in] dtype1 dtype_t* Wrapper struct for C++ Metaschema type class that should be updated. @param[in] dtype2 dtype_t* Wrapper struct for C++ Metaschema type class that should be updated from. @returns: int 0 if successfull, -1 if there was an error. */ int update_dtype(dtype_t* dtype1, dtype_t* dtype2); /*! @brief Wrapper for updatining a type object with information from the provided variable arguments if a generic structure is present. @param[in] dtype1 dtype_t* Wrapper struct for C++ Metaschema type class that should be updated. @param[in] nargs size_t Number of arguments in ap. @param[in] ap va_list_t Variable argument list. @returns: int 0 if successfull, -1 if there was an error. */ int update_dtype_from_generic_ap(dtype_t* dtype1, size_t nargs, va_list_t ap); /*! @brief Wrapper for updating the precision of a bytes or unicode scalar type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] new_precision size_t New precision. @returns: int 0 if free was successfull, -1 if there was an error. */ int update_precision_dtype(const dtype_t* dtype, const size_t new_precision); /*! @brief Wrapper for deserializing from a data type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] buf character pointer to serialized message. @param[in] buf_siz size_t Size of buf. @param[in] allow_realloc int If 1, variables being filled are assumed to be pointers to pointers for heap memory. If 0, variables are assumed to be pointers to stack memory. If allow_realloc is set to 1, but stack variables are passed, a segfault can occur. @param[in, out] nargs int Number of arguments remaining in argument list. @param[in] ap va_list Arguments to be parsed from message. returns: int The number of populated arguments. -1 indicates an error. */ int deserialize_dtype(const dtype_t *dtype, const char *buf, const size_t buf_siz, const int allow_realloc, size_t *nargs, va_list_t ap); /*! @brief Wrapper for serializing from a data type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] buf character pointer to pointer to memory where serialized message should be stored. @param[in] buf_siz size_t Size of memory allocated to buf. @param[in] allow_realloc int If 1, buf will be realloced if it is not big enough to hold the serialized emssage. If 0, an error will be returned. @param[in, out] nargs int Number of arguments remaining in argument list. @param[in] ap va_list Arguments to be formatted. returns: int The length of the serialized message or -1 if there is an error. */ int serialize_dtype(const dtype_t *dtype, char **buf, size_t *buf_siz, const int allow_realloc, size_t *nargs, va_list_t ap); /*! @brief Wrapper for displaying a data type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] indent char* Indentation to add to display output. */ void display_dtype(const dtype_t *dtype, const char* indent); /*! @brief Wrapper for determining how many arguments a data type expects. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. */ size_t nargs_exp_dtype(const dtype_t *dtype); #define free_generic destroy_generic #define init_json_object init_generic_map #define init_json_array init_generic_array #define init_schema init_generic #define free_json_object free_generic #define free_json_array free_generic #define free_schema free_generic #define copy_json_object copy_generic #define copy_json_array copy_generic #define copy_schema copy_generic #define display_json_object display_generic #define display_json_array display_generic #define display_schema display_generic #ifdef __cplusplus /* If this is a C++ compiler, end C linkage */ } #endif #endif /*DATATYPES_H_*/
interpolate_v2_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { template <typename T, size_t D, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; inline std::vector<int> get_new_shape( const std::vector<const Tensor*>& list_new_shape_tensor) { // get tensor from std::vector<int> vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}), platform::errors::InvalidArgument( "The shape of dimension tensor should be [1]," "but received d%.", tensor->dims())); if (platform::is_gpu_place(tensor->place())) { framework::Tensor temp; TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>())); } else { vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>())); } } return vec_new_shape; } template <typename T> inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) { std::vector<T> vec_new_data; auto* new_data = new_data_tensor->data<T>(); framework::Tensor cpu_starts_tensor; if (platform::is_gpu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #endif vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel()); return vec_new_data; } inline void ExtractNCDWH(const framework::DDim& dims, const DataLayout& data_layout, int* N, int* C, int* D, int* H, int* W) { *N = dims[0]; if (dims.size() == 3) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2]; *D = 1; *H = 1; *W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; } else if (dims.size() == 4) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3]; *D = 1; *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; } else { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4]; *D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; *W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3]; } } template <typename T> static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = input_t(i, j, in_k, in_l); } else { output_t(i, k, l, j) = input_t(i, in_k, in_l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolate( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); for (int d = 0; d < out_d; d++) { // loop for images int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, d, k, l) = input_t(i, j, in_d, in_k, in_l); } else { // NDHWC output_t(i, d, k, l, j) = input_t(i, in_d, in_k, in_l, j); } } } } } } } template <typename T> static void LinearInterpolation(const Tensor& input, Tensor* output, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 3>::From(input); auto output_t = EigenTensor<T, 3>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int l = 0; l < out_w; l++) { // linear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vx_w[l]) * vd_e[l] + input_t(i, j, vx_e[l]) * vd_w[l]; output_t(i, j, l) = out_t; } else { out_t = input_t(i, vx_w[l], j) * vd_e[l] + input_t(i, vx_e[l], j) * vd_w[l]; output_t(i, l, j) = out_t; } } } } } template <typename T> static void LinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 3>::From(*input_grad); auto output_grad_t = EigenTensor<T, 3>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // linear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, l); input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e); input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w); } else { const T grad = output_grad_t(i, l, j); input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e); input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w); } } } } } template <typename T> static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int k = 0; k < out_h; k++) { // loop for images for (int l = 0; l < out_w; l++) { // bilinear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] + input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] + input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] + input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l]; output_t(i, j, k, l) = out_t; } else { out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] + input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] + input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] + input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l]; output_t(i, k, l, j) = out_t; } } } } } } template <typename T> static void TrilinearInterpolation( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vt_f, vt_b; std::vector<float> vd_f, vd_b; vt_f.reserve(out_d); vt_b.reserve(out_d); vd_f.reserve(out_d); vd_b.reserve(out_d); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int j = 0; j < out_d; j++) { int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; { vt_f[j] = t_f; vt_b[j] = t_b; vd_f[j] = d_f; vd_b[j] = d_b; } } std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(5) #endif for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels for (int j = 0; j < out_d; j++) { // loop for D, H, W for (int k = 0; k < out_h; k++) { for (int l = 0; l < out_w; l++) { // trilinear interpolation if (data_layout == DataLayout::kNCHW) { T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, i, j, k, l) = out_t; } else { T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, j, k, l, i) = out_t; } } } } } } } template <typename T> HOSTDEVICE inline T cubic_convolution1(T x, T A) { return ((A + 2) * x - (A + 3)) * x * x + 1; } template <typename T> HOSTDEVICE inline T cubic_convolution2(T x, T A) { return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } template <typename T> HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) { T A = -0.75; T x1 = t; coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A); coeffs[1] = cubic_convolution1<T>(x1, A); // opposite coefficients T x2 = 1.0 - t; coeffs[2] = cubic_convolution1<T>(x2, A); coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A); } template <typename T> static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { T coeffs[4]; get_cubic_upsample_coefficients<T>(coeffs, t); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> static void BicubicInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); const T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); const T x_t = x_n - input_x; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels T coefficients[4]; // interp 4 times in x direction for (int ii = 0; ii < 4; ii++) { int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), static_cast<int>(0)); int access_x_0 = std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0)); int access_x_1 = std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0)); int access_x_2 = std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0)); int access_x_3 = std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { coefficients[ii] = cubic_interp<T>(input_t(i, j, access_y, access_x_0), input_t(i, j, access_y, access_x_1), input_t(i, j, access_y, access_x_2), input_t(i, j, access_y, access_x_3), x_t); } else { coefficients[ii] = cubic_interp<T>(input_t(i, access_y, access_x_0, j), input_t(i, access_y, access_x_1, j), input_t(i, access_y, access_x_2, j), input_t(i, access_y, access_x_3, j), x_t); } } // interp y direction if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { output_t(i, k, l, j) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } } } } } } template <typename T> static void NearestNeighborInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l); } else { input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); for (int d = 0; d < out_d; d++) { int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_d, in_k, in_l) += output_grad_t(i, j, d, k, l); } else { input_grad_t(i, in_d, in_k, in_l, j) += output_grad_t(i, d, k, l, j); } } } } } } } template <typename T> static void BilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w); } else { const T grad = output_grad_t(i, k, l, j); input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w); } } } } } } template <typename T> static void TrilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int j = 0; j < out_d; j++) { // loop for D int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; for (int k = 0; k < out_h; k++) { // loop for H int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { // loop for W int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels // trilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(b, i, j, k, l); input_grad_t(b, i, t_f, y_n, x_w) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, i, t_f, y_n, x_e) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, i, t_f, y_s, x_w) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, i, t_f, y_s, x_e) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, i, t_b, y_n, x_w) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, i, t_b, y_n, x_e) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, i, t_b, y_s, x_w) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, i, t_b, y_s, x_e) += static_cast<T>(grad * d_f * d_n * d_w); } else { const T grad = output_grad_t(b, j, k, l, i); input_grad_t(b, t_f, y_n, x_w, i) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, t_f, y_n, x_e, i) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, t_f, y_s, x_w, i) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, t_f, y_s, x_e, i) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, t_b, y_n, x_w, i) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, t_b, y_n, x_e, i) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, t_b, y_s, x_w, i) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, t_b, y_s, x_e, i) += static_cast<T>(grad * d_f * d_n * d_w); } } } } } } } template <typename T> static void BicubicInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); T x_t = x_n - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients<T>(x_coeffs, x_t); get_cubic_upsample_coefficients<T>(y_coeffs, y_t); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bicubic interpolation grad for (int ii = 0; ii < 4; ii++) { for (int jj = 0; jj < 4; jj++) { int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1), static_cast<int>(0)); int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, access_y, access_x) += grad * y_coeffs[jj] * x_coeffs[ii]; } else { T grad = output_grad_t(i, k, l, j); input_grad_t(i, access_y, access_x, j) += grad * y_coeffs[jj] * x_coeffs[ii]; } } } } } } } } template <typename T> static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1.; if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } else { // float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_w > 0. && scale_h > 0. && scale_d > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolate<T>(input, output, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale_w = -1.0; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_w = scale_data[0]; scale_h = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolationGrad<T>( output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolateGrad<T>(output_grad, input_grad, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> class InterpolateV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCPUFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation grad Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation grad Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation grad Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle
data.c
#include "../mesh.h" #include "../params.h" #include "../shared.h" #include "../umesh.h" #include <math.h> #include <stdlib.h> // Checks if two strings match #pragma acc routine seq int device_strmatch(const char* str1, const char* str2) { int ii = 0; for (ii = 0; str1[ii] != '\0'; ++ii) { if (str1[ii] != str2[ii]) { return 0; } } return str1[ii] == str2[ii]; } // Allocates some double precision data size_t allocate_data(double** buf, size_t len) { allocate_host_data(buf, len); double* local_buf = *buf; #pragma acc enter data copyin(local_buf[:len]) #pragma acc parallel #pragma acc loop independent for (size_t ii = 0; ii < len; ++ii) { local_buf[ii] = 0.0; } return sizeof(double) * len; } // Allocates some int precision data size_t allocate_int_data(int** buf, size_t len) { allocate_host_int_data(buf, len); int* local_buf = *buf; #pragma acc enter data copyin(local_buf[ : len]) #pragma acc parallel #pragma acc loop independent for (size_t ii = 0; ii < len; ++ii) { local_buf[ii] = 0; } return sizeof(int) * len; } // Allocates some int precision data size_t allocate_uint64_data(uint64_t** buf, size_t len) { allocate_host_uint64_data(buf, len); uint64_t* local_buf = *buf; #pragma acc enter data copyin(local_buf[ : len]) #pragma acc parallel #pragma acc loop independent for (size_t ii = 0; ii < len; ++ii) { local_buf[ii] = 0; } return sizeof(uint64_t) * len; } // Allocates a host copy of some buffer void allocate_host_data(double** buf, size_t len) { #ifdef INTEL *buf = (double*)_mm_malloc(sizeof(double) * len, VEC_ALIGN); #else *buf = (double*)malloc(sizeof(double) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } } // Allocates a host copy of some integer buffer void allocate_host_int_data(int** buf, size_t len) { #ifdef INTEL *buf = (int*)_mm_malloc(sizeof(int) * len, VEC_ALIGN); #else *buf = (int*)malloc(sizeof(int) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } } void allocate_host_uint64_data(uint64_t** buf, const size_t len) { #ifdef INTEL *buf = (uint64_t*)_mm_malloc(sizeof(uint64_t) * len, VEC_ALIGN); #else *buf = (uint64_t*)malloc(sizeof(uint64_t) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } } // Allocates a data array void deallocate_data(double* buf) { #pragma acc exit data delete(buf) } // Allocates a data array void deallocate_int_data(int* buf) { #pragma acc exit data delete(buf) } // Allocates a data array void deallocate_host_data(double* buf) { #ifdef INTEL _mm_free(buf); #else free(buf); #endif } // Synchronise data void copy_buffer(const size_t len, double** src, double** dst, int send) { double* local_src = *src; if (send == SEND) { #pragma acc update device(local_src[ : len]) } else { #pragma acc update self(local_src[ : len]) } *dst = *src; } // Move a host buffer onto the device void move_host_buffer_to_device(const size_t len, double** src, double** dst) { double* local_src = *src; #pragma acc enter data copyin(local_src[:len]) *dst = local_src; } // Initialises mesh data in device specific manner void mesh_data_init_2d(const int local_nx, const int local_ny, const int global_nx, const int global_ny, const int pad, const int x_off, const int y_off, const double width, const double height, double* edgex, double* edgey, double* edgedx, double* edgedy, double* celldx, double* celldy) { // Simple uniform rectilinear initialisation #pragma acc parallel #pragma acc loop independent for (int ii = 0; ii < local_nx + 1; ++ii) { edgedx[ii] = width / (global_nx); // Note: correcting for padding edgex[ii] = edgedx[ii] * (x_off + ii - pad); } #pragma acc parallel #pragma acc loop independent for (int ii = 0; ii < local_nx; ++ii) { celldx[ii] = width / (global_nx); } #pragma acc parallel #pragma acc loop independent for (int ii = 0; ii < local_ny + 1; ++ii) { edgedy[ii] = height / (global_ny); // Note: correcting for padding edgey[ii] = edgedy[ii] * (y_off + ii - pad); } #pragma acc parallel #pragma acc loop independent for (int ii = 0; ii < local_ny; ++ii) { celldy[ii] = height / (global_ny); } } // Initialises mesh data in device specific manner void mesh_data_init_3d(const int local_nx, const int local_ny, const int local_nz, const int global_nx, const int global_ny, const int global_nz, const int pad, const int x_off, const int y_off, const int z_off, const double width, const double height, const double depth, double* edgex, double* edgey, double* edgez, double* edgedx, double* edgedy, double* edgedz, double* celldx, double* celldy, double* celldz) { // Initialise as in the 2d case mesh_data_init_2d(local_nx, local_ny, global_nx, global_ny, pad, x_off, y_off, width, height, edgex, edgey, edgedx, edgedy, celldx, celldy); // Simple uniform rectilinear initialisation #pragma acc parallel #pragma acc loop independent for (int ii = 0; ii < local_nz + 1; ++ii) { edgedz[ii] = depth / (global_nz); edgez[ii] = edgedz[ii] * (z_off + ii - pad); } #pragma acc parallel #pragma acc loop independent for (int ii = 0; ii < local_nz; ++ii) { celldz[ii] = depth / (global_nz); } } // Initialise state data in device specific manner void set_problem_2d(const int local_nx, const int local_ny, const int pad, const double mesh_width, const double mesh_height, const double* edgex, const double* edgey, const int ndims, const char* problem_def_filename, double* density, double* energy, double* temperature) { char* keys = (char*)malloc(sizeof(char) * MAX_KEYS * MAX_STR_LEN); double* values; allocate_data(&values, MAX_KEYS); #pragma acc update host(edgex[:local_nx+1], edgey[:local_ny+1]) int nentries = 0; while (1) { char specifier[MAX_STR_LEN]; sprintf(specifier, "problem_%d", nentries++); int nkeys = 0; if (!get_key_value_parameter(specifier, problem_def_filename, keys, values, &nkeys)) { break; } copy_buffer(MAX_KEYS, &values, &values, SEND); // The last four keys are the bound specification double xpos = values[nkeys - 4] * mesh_width; double ypos = values[nkeys - 3] * mesh_height; double width = values[nkeys - 2] * mesh_width; double height = values[nkeys - 1] * mesh_height; int failed = 0; // Loop through the mesh and set the problem #pragma omp parallel for for (int ii = pad; ii < local_ny - pad; ++ii) { for (int jj = pad; jj < local_nx - pad; ++jj) { double global_xpos = edgex[jj]; double global_ypos = edgey[ii]; // Check we are in bounds of the problem entry if (global_xpos >= xpos && global_ypos >= ypos && global_xpos < xpos + width && global_ypos < ypos + height) { // The upper bound excludes the bounding box for the entry for (int kk = 0; kk < nkeys - (2 * ndims); ++kk) { const char* key = &keys[kk * MAX_STR_LEN]; if (strmatch(key, "density")) { density[ii * local_nx + jj] = values[kk]; } else if (strmatch(key, "energy")) { energy[ii * local_nx + jj] = values[kk]; } else if (strmatch(key, "temperature")) { temperature[ii * local_nx + jj] = values[kk]; } else { TERMINATE("Found unrecognised key in %s.\n", problem_def_filename); } } } } } } #pragma acc update device(density[:local_nx*local_ny], energy[:local_nx*local_ny], \ temperature[:local_nx*local_ny]) free(keys); deallocate_data(values); } // Initialise state data in device specific manner void set_problem_3d(const int local_nx, const int local_ny, const int local_nz, const int pad, const double mesh_width, const double mesh_height, const double mesh_depth, const double* edgex, const double* edgey, const double* edgez, const int ndims, const char* problem_def_filename, double* density, double* energy, double* temperature) { char* keys = (char*)malloc(sizeof(char) * MAX_KEYS * MAX_STR_LEN); double* values; allocate_data(&values, MAX_KEYS); #pragma acc update host(edgex[:local_nx+1], edgey[:local_ny+1], edgez[:local_nz+1]) int nentries = 0; while (1) { char specifier[MAX_STR_LEN]; sprintf(specifier, "problem_%d", nentries++); int nkeys = 0; if (!get_key_value_parameter(specifier, problem_def_filename, keys, values, &nkeys)) { break; } // The last four keys are the bound specification double xpos = values[nkeys - 6] * mesh_width; double ypos = values[nkeys - 5] * mesh_height; double zpos = values[nkeys - 4] * mesh_depth; double width = values[nkeys - 3] * mesh_width; double height = values[nkeys - 2] * mesh_height; double depth = values[nkeys - 1] * mesh_depth; int failed = 0; // Loop through the mesh and set the problem #pragma omp parallel for for (int ii = pad; ii < local_nz - pad; ++ii) { for (int jj = pad; jj < local_ny - pad; ++jj) { for (int kk = pad; kk < local_nx - pad; ++kk) { double global_xpos = edgex[kk]; double global_ypos = edgey[jj]; double global_zpos = edgez[ii]; // Check we are in bounds of the problem entry if (global_xpos >= xpos && global_ypos >= ypos && global_zpos >= zpos && global_xpos < xpos + width && global_ypos < ypos + height && global_zpos < zpos + depth) { // The upper bound excludes the bounding box for the entry for (int ee = 0; ee < nkeys - (2 * ndims); ++ee) { const int index = ii * local_nx * local_ny + jj * local_nx + kk; const char* key = &keys[ee * MAX_STR_LEN]; if (device_strmatch(key, "density")) { density[index] = values[ee]; } else if (device_strmatch(key, "energy")) { energy[index] = values[ee]; } else if (device_strmatch(key, "temperature")) { temperature[index] = values[ee]; } else { failed++; } } } } } } if(failed) { TERMINATE("Found unrecognised key in %s.\n", problem_def_filename); } } #pragma acc update device(density[:local_nx*local_ny*local_nz]) #pragma acc update device(energy[:local_nx*local_ny*local_nz]) #pragma acc update device(temperature[:local_nx*local_ny*local_nz]) free(keys); deallocate_data(values); } // Finds the normals for all boundary cells void find_boundary_normals(UnstructuredMesh* umesh, int* boundary_edge_list) { const int nnodes = umesh->nnodes; const int nboundary_nodes = umesh->nboundary_nodes; const int* boundary_index = umesh->boundary_index; const double* nodes_x0 = umesh->nodes_x0; const double* nodes_y0 = umesh->nodes_y0; const double* nodes_z0 = umesh->nodes_z0; int* boundary_type = umesh->boundary_type; double* boundary_normal_x = umesh->boundary_normal_x; double* boundary_normal_y = umesh->boundary_normal_y; // Loop through all of the boundary cells and find their normals for (int nn = 0; nn < nnodes; ++nn) { const int bi = boundary_index[(nn)]; if (bi == IS_INTERIOR) { continue; } double normal_x = 0.0; double normal_y = 0.0; for (int bb1 = 0; bb1 < nboundary_nodes; ++bb1) { const int node0 = boundary_edge_list[bb1 * 2]; const int node1 = boundary_edge_list[bb1 * 2 + 1]; if (node0 == nn || node1 == nn) { const double node0_x = nodes_x0[(node0)]; const double node0_y = nodes_y0[(node0)]; const double node1_x = nodes_x0[(node1)]; const double node1_y = nodes_y0[(node1)]; normal_x += node0_y - node1_y; normal_y += -(node0_x - node1_x); } } // We are fixed if we are one of the four corners if ((nodes_x0[(nn)] == 0.0 || nodes_x0[(nn)] == 1.0) && (nodes_y0[(nn)] == 0.0 || nodes_y0[(nn)] == 1.0)) { boundary_type[(bi)] = IS_CORNER; } else { boundary_type[(bi)] = IS_BOUNDARY; } const double normal_mag = sqrt(normal_x * normal_x + normal_y * normal_y); boundary_normal_x[(bi)] = normal_x / normal_mag; boundary_normal_y[(bi)] = normal_y / normal_mag; } }
DepthwiseDenseAffine.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include <random> #include "bb/DataType.h" #include "bb/Model.h" #ifdef BB_WITH_CUDA #include "cuda_runtime.h" #include "cublas_v2.h" #include "bbcu/bbcu.h" #endif namespace bb { // Affineレイヤー template <typename T = float> class DepthwiseDenseAffine : public Model { using _super = Model; public: static inline std::string ModelName(void) { return "DepthwiseDenseAffine"; } static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<T>::Name(); } std::string GetModelName(void) const override { return ModelName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: bool m_host_only = false; bool m_binary_mode = false; T m_initialize_std = (T)0.01; std::string m_initializer = ""; std::mt19937_64 m_mt; indices_t m_input_shape; index_t m_input_point_size = 0; index_t m_input_node_size = 0; indices_t m_output_shape; index_t m_output_point_size = 0; index_t m_output_node_size = 0; index_t m_depth_size = 0; std::shared_ptr<Tensor> m_W; std::shared_ptr<Tensor> m_b; std::shared_ptr<Tensor> m_dW; std::shared_ptr<Tensor> m_db; #ifdef BB_WITH_CUDA bool m_cublasEnable = false; cublasHandle_t m_cublasHandle; #endif public: struct create_t { indices_t output_shape; index_t input_point_size = 0; index_t depth_size = 0; T initialize_std = (T)0.01; std::string initializer = ""; std::uint64_t seed = 1; }; protected: DepthwiseDenseAffine(create_t const &create) { m_W = std::make_shared<Tensor>(); m_b = std::make_shared<Tensor>(); m_dW = std::make_shared<Tensor>(); m_db = std::make_shared<Tensor>(); #ifdef BB_WITH_CUDA if ( cublasCreate(&m_cublasHandle) == CUBLAS_STATUS_SUCCESS ) { m_cublasEnable = true; } #endif // BB_ASSERT(!create.output_shape.empty()); m_initialize_std = create.initialize_std; m_initializer = create.initializer; m_mt.seed(create.seed); m_output_shape = create.output_shape; m_output_node_size = CalcShapeSize(m_output_shape); m_depth_size = create.depth_size; m_input_point_size = create.input_point_size; } void CommandProc(std::vector<std::string> args) override { _super::CommandProc(args); // バイナリモード設定 if ( args.size() == 2 && args[0] == "binary" ) { m_binary_mode = EvalBool(args[1]); } // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } } void PrintInfoText(std::ostream& os, std::string indent, int columns, int nest, int depth) const override { _super::PrintInfoText(os, indent, columns, nest, depth); // os << indent << " input shape : " << GetInputShape(); // os << indent << " output shape : " << GetOutputShape(); os << indent << " input(" << m_input_point_size << ", " << m_depth_size << ")" << " output(" << m_output_point_size << ", " << m_depth_size << ")" << std::endl; } public: ~DepthwiseDenseAffine() { #ifdef BB_WITH_CUDA if ( m_cublasEnable ) { BB_CUBLAS_SAFE_CALL(cublasDestroy(m_cublasHandle)); m_cublasEnable = false; } #endif } static std::shared_ptr<DepthwiseDenseAffine> Create(create_t const &create) { return std::shared_ptr<DepthwiseDenseAffine>(new DepthwiseDenseAffine(create)); } static std::shared_ptr<DepthwiseDenseAffine> Create(indices_t const &output_shape, index_t input_point_size=0, index_t depth_size=0) { create_t create; create.output_shape = output_shape; create.input_point_size = input_point_size; create.depth_size = depth_size; return Create(create); } static std::shared_ptr<DepthwiseDenseAffine> Create(index_t output_node_size, index_t input_point_size=0, index_t depth_size=0) { create_t create; create.output_shape.resize(1); create.output_shape[0] = output_node_size; return Create(indices_t({output_node_size}), input_point_size, depth_size); } static std::shared_ptr<DepthwiseDenseAffine> Create(void) { return Create(create_t()); } #ifdef BB_PYBIND11 static std::shared_ptr<DepthwiseDenseAffine> CreatePy( indices_t output_shape, index_t input_point_size = 0, index_t depth_size = 0, T initialize_std = (T)0.01, std::string initializer = "he", std::uint64_t seed = 1 ) { create_t create; create.output_shape = output_shape; create.input_point_size = input_point_size; create.depth_size = depth_size; create.initialize_std = initialize_std; create.initializer = initializer; create.seed = seed; return Create(create); } #endif Tensor &W(void) { return *m_W; } Tensor const &W(void) const { return *m_W; } Tensor &b(void) { return *m_b; } Tensor const &b(void) const { return *m_b; } Tensor &dW(void) { return *m_dW; } Tensor const &dW(void) const { return *m_dW; } Tensor &db(void) { return *m_db; } Tensor const &db(void) const { return *m_db; } auto lock_W(void) { return m_W->Lock<T>(); } auto lock_W_const(void) const { return m_W->LockConst<T>(); } auto lock_b(void) { return m_b->Lock<T>(); } auto lock_b_const(void) const { return m_b->LockConst<T>(); } auto lock_dW(void) { return m_dW->Lock<T>(); } auto lock_dW_const(void) const { return m_dW->LockConst<T>(); } auto lock_db(void) { return m_db->Lock<T>(); } auto lock_db_const(void) const { return m_db->LockConst<T>(); } /** * @brief 入力のshape設定 * @detail 入力のshape設定 * @param shape 新しいshape * @return なし */ indices_t SetInputShape(indices_t shape) { BB_ASSERT(!shape.empty()); // 設定済みなら何もしない if ( shape == this->GetInputShape() ) { return this->GetOutputShape(); } // 形状設定 m_input_shape = shape; m_input_node_size = CalcShapeSize(shape); if ( m_depth_size <= 0 ) { if ( m_input_point_size > 0 ) { m_depth_size = m_input_node_size / m_input_point_size; } else { m_depth_size = m_output_shape[0]; } } BB_ASSERT(m_output_node_size > 0); BB_ASSERT(m_depth_size > 0); BB_ASSERT(m_output_node_size % m_depth_size == 0); BB_ASSERT(m_input_node_size % m_depth_size == 0); m_input_point_size = m_input_node_size / m_depth_size; m_output_point_size = m_output_node_size / m_depth_size; // パラメータ初期化 m_W->Resize ({m_depth_size, m_output_point_size, m_input_point_size}, DataType<T>::type); m_b->Resize ({m_depth_size, m_output_point_size}, DataType<T>::type); m_dW->Resize({m_depth_size, m_output_point_size, m_input_point_size}, DataType<T>::type); m_db->Resize({m_depth_size, m_output_point_size}, DataType<T>::type); if (m_initializer == "he" || m_initializer == "He") { m_initialize_std = (T)std::sqrt(2.0 / (double)m_input_node_size); m_W->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_b->InitNormalDistribution(0.0, m_initialize_std, m_mt()); } else if (m_initializer == "xavier" || m_initializer == "Xavier" ) { m_initialize_std = (T)std::sqrt(1.0 / (double)m_input_node_size); m_W->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_b->InitNormalDistribution(0.0, m_initialize_std, m_mt()); } else if (m_initializer == "normal" || m_initializer == "Normal" ) { m_W->InitNormalDistribution(0.0, m_initialize_std, m_mt()); m_b->InitNormalDistribution(0.0, m_initialize_std, m_mt()); } else if (m_initializer == "uniform" || m_initializer == "Uniform" ) { double k = m_initialize_std * std::sqrt(3); m_W->InitUniformDistribution(-k, +k, m_mt()); m_b->InitUniformDistribution(-k, +k, m_mt()); } else { double k = std::sqrt(1.0 / (double)m_input_node_size); m_W->InitUniformDistribution(-k, +k, m_mt()); m_b->InitUniformDistribution(-k, +k, m_mt()); } m_dW->FillZero(); m_db->FillZero(); return m_output_shape; } /** * @brief 出力のshape設定 * @detail 出力のshape設定 * 出力ノード数が変わらない限りshpeは自由 * @param shape 新しいshape * @return なし */ void SetOutputShape(indices_t const &shape) { BB_ASSERT(CalcShapeSize(shape) == CalcShapeSize(m_output_shape)); m_output_shape = shape; } /** * @brief 入力形状取得 * @detail 入力形状を取得する * @return 入力形状を返す */ indices_t GetInputShape(void) const { return m_input_shape; } /** * @brief 出力形状取得 * @detail 出力形状を取得する * @return 出力形状を返す */ indices_t GetOutputShape(void) const { return m_output_shape; } Variables GetParameters(void) { Variables parameters; if ( !this->m_parameter_lock ) { parameters.PushBack(m_W); parameters.PushBack(m_b); } return parameters; } Variables GetGradients(void) { Variables gradients; if ( !this->m_parameter_lock ) { gradients.PushBack(m_dW); gradients.PushBack(m_db); } return gradients; } FrameBuffer Forward(FrameBuffer x_buf, bool train = true) { // backwardの為に保存 if ( train ) { this->PushFrameBuffer(x_buf); } // 型合わせ if ( x_buf.GetType() != DataType<T>::type ) { x_buf = x_buf.ConvertTo(DataType<T>::type); } BB_ASSERT(x_buf.GetType() == DataType<T>::type); BB_ASSERT(x_buf.GetNodeSize() == m_input_node_size); // SetInputShpaeされていなければ初回に設定 if (x_buf.GetNodeSize() != m_input_node_size) { SetInputShape(x_buf.GetShape()); } // 出力を設定 FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<T>::type); #ifdef BB_WITH_CUDA if (DataType<T>::type == BB_TYPE_FP32 && m_cublasEnable && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) { auto x_ptr = x_buf.LockDeviceMemoryConst(); auto y_ptr = y_buf.LockDeviceMemory(true); auto W_ptr = m_W->LockDeviceMemoryConst(); auto b_ptr = m_b->LockDeviceMemoryConst(); bbcu_fp32_MatrixRowwiseSetVector ( (float const *)b_ptr.GetAddr(), (float *)y_ptr.GetAddr(), (int )y_buf.GetNodeSize(), (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(float)) ); int x_frame_stride = (int)(x_buf.GetFrameStride() / sizeof(float)); int y_frame_stride = (int)(y_buf.GetFrameStride() / sizeof(float)); float alpha = 1.0f; float beta = 1.0f; for (index_t depth = 0; depth < m_depth_size; ++depth) { BB_CUBLAS_SAFE_CALL(cublasSgemm ( m_cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, (int)y_buf.GetFrameSize(), (int)m_output_point_size, // y_buf.GetNodeSize(), (int)m_input_point_size, // x_buf.GetNodeSize(), &alpha, (const float *)x_ptr.GetAddr() + (depth * m_input_point_size * x_frame_stride), (int)x_frame_stride, (const float *)W_ptr.GetAddr() + (depth * m_input_point_size * m_output_point_size), (int)m_input_point_size, // x_buf.GetNodeSize(), &beta, (float *)y_ptr.GetAddr() + (depth * m_output_point_size * y_frame_stride), (int)y_frame_stride )); } return y_buf; } #endif { auto frame_size = x_buf.GetFrameSize(); auto x_ptr = x_buf.LockConst<T>(); auto y_ptr = y_buf.Lock<T>(); auto W_ptr = lock_W_const(); auto b_ptr = lock_b_const(); #pragma omp parallel for for (index_t frame = 0; frame < frame_size; ++frame) { for (index_t depth = 0; depth < m_depth_size; ++depth) { for (index_t output_point = 0; output_point < m_output_point_size; ++output_point) { index_t output_node = m_output_point_size * depth + output_point; y_ptr.Set(frame, output_node, b_ptr(depth, output_point)); for (index_t input_point = 0; input_point < m_input_point_size; ++input_point) { y_ptr.Add(frame, output_node, x_ptr.Get(frame, depth * m_input_point_size + input_point) * W_ptr(depth, output_point, input_point)); } } } } return y_buf; } } FrameBuffer Backward(FrameBuffer dy_buf) { if (dy_buf.Empty()) { m_dW = 0; m_db = 0; return dy_buf; } BB_ASSERT(dy_buf.GetType() == DataType<T>::type); // フレーム数 auto frame_size = dy_buf.GetFrameSize(); FrameBuffer x_buf = this->PopFrameBuffer(); // 型合わせ if ( x_buf.GetType() != DataType<T>::type ) { x_buf = x_buf.ConvertTo(DataType<T>::type); } FrameBuffer dx_buf(dy_buf.GetFrameSize(), x_buf.GetShape(), DataType<T>::type); #ifdef BB_WITH_CUDA if (DataType<T>::type == BB_TYPE_FP32 && m_cublasEnable && dy_buf.IsDeviceAvailable() && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) { auto dy_ptr = dy_buf.LockDeviceMemoryConst(); auto x_ptr = x_buf.LockDeviceMemoryConst(); auto dx_ptr = dx_buf.LockDeviceMemory(true); auto W_ptr = m_W->LockDeviceMemoryConst(); auto b_ptr = m_b->LockDeviceMemoryConst(); auto dW_ptr = m_dW->LockDeviceMemory(); auto db_ptr = m_db->LockDeviceMemory(); bbcu_fp32_MatrixColwiseSum ( (float const *)dy_ptr.GetAddr(), (float *)db_ptr.GetAddr(), (int )dy_buf.GetNodeSize(), (int )dy_buf.GetFrameSize(), (int )(dy_buf.GetFrameStride() / sizeof(float)) ); int dx_frame_stride = (int)(dx_buf.GetFrameStride() / sizeof(float)); int dy_frame_stride = (int)(dy_buf.GetFrameStride() / sizeof(float)); for (index_t depth = 0; depth < m_depth_size; ++depth) { float alpha = 1.0f; float beta = 0.0f; BB_CUBLAS_SAFE_CALL(cublasSgemm ( m_cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, (int)dx_buf.GetFrameSize(), (int)m_input_point_size, // dx_buf.GetNodeSize(), (int)m_output_point_size, // dy_buf.GetNodeSize(), &alpha, (const float *)dy_ptr.GetAddr() + (depth * m_output_point_size * dy_frame_stride), (int)dy_frame_stride, (const float *)W_ptr.GetAddr() + (depth * m_output_point_size * m_input_point_size), (int)m_input_point_size, // dx_buf.GetNodeSize(), &beta, (float *)dx_ptr.GetAddr() + (depth * m_input_point_size * dx_frame_stride), (int)dx_frame_stride )); beta = 1.0f; BB_CUBLAS_SAFE_CALL(cublasSgemm ( m_cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, (int)m_input_point_size, // dx_buf.GetNodeSize(), (int)m_output_point_size, // dy_buf.GetNodeSize(), (int)dx_buf.GetFrameSize(), &alpha, (const float *)x_ptr.GetAddr() + (depth * m_input_point_size * dx_frame_stride), (int)dx_frame_stride, (const float *)dy_ptr.GetAddr() + (depth * m_output_point_size * dy_frame_stride), (int)dy_frame_stride, &beta, (float *)dW_ptr.GetAddr() + (depth * m_output_point_size * m_input_point_size), (int)m_input_point_size // dx_buf.GetNodeSize() )); } return dx_buf; } #endif { dx_buf.FillZero(); auto x_ptr = x_buf.LockConst<T>(); auto dy_ptr = dy_buf.LockConst<T>(); auto dx_ptr = dx_buf.Lock<T>(); auto W_ptr = lock_W_const(); auto b_ptr = lock_b_const(); auto dW_ptr = lock_dW(); auto db_ptr = lock_db(); #pragma omp parallel for for (index_t frame = 0; frame < frame_size; ++frame) { for (index_t depth = 0; depth < m_depth_size; ++depth) { for (index_t output_point = 0; output_point < m_output_point_size; ++output_point) { auto output_node = depth * m_output_point_size + output_point; auto grad = dy_ptr.Get(frame, output_node); db_ptr(depth, output_point) += grad; for (index_t input_point = 0; input_point < m_input_point_size; ++input_point) { dx_ptr.Add(frame, depth * m_input_point_size + input_point, grad * W_ptr(depth, output_point, input_point)); dW_ptr(depth, output_point, input_point) += grad * x_ptr.Get(frame, depth * m_input_point_size + input_point); } } } } return dx_buf; } } // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_host_only); bb::SaveValue(os, m_binary_mode); bb::SaveValue(os, m_initialize_std); bb::SaveValue(os, m_initializer); bb::SaveValue(os, m_input_shape); bb::SaveValue(os, m_output_shape); bb::SaveValue(os, m_input_point_size); bb::SaveValue(os, m_depth_size); m_W->DumpObject(os); m_b->DumpObject(os); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_host_only); bb::LoadValue(is, m_binary_mode); bb::LoadValue(is, m_initialize_std); bb::LoadValue(is, m_initializer); bb::LoadValue(is, m_input_shape); bb::LoadValue(is, m_output_shape); bb::LoadValue(is, m_input_point_size); bb::LoadValue(is, m_depth_size); m_W->LoadObject(is); m_b->LoadObject(is); // 再構築 m_input_node_size = CalcShapeSize(m_input_shape); m_output_node_size = CalcShapeSize(m_output_shape); if ( !m_input_shape.empty() ) { if ( m_depth_size <= 0 ) { if ( m_input_point_size > 0 ) { m_depth_size = m_input_node_size / m_input_point_size; } else { m_depth_size = m_output_shape[0]; } } m_input_point_size = m_input_node_size / m_depth_size; m_output_point_size = m_output_node_size / m_depth_size; m_dW->Resize({m_depth_size, m_output_point_size, m_input_point_size}, DataType<T>::type); m_dW->FillZero(); m_db->Resize({m_depth_size, m_output_point_size}, DataType<T>::type); m_db->FillZero(); } } public: // Serialize(旧) void Save(std::ostream &os) const { SaveValue(os, m_binary_mode); SaveIndices(os, m_input_shape); SaveIndices(os, m_output_shape); m_W->Save(os); m_b->Save(os); } void Load(std::istream &is) { bb::LoadValue(is, m_binary_mode); m_input_shape = bb::LoadIndices(is); m_output_shape = bb::LoadIndices(is); m_W->Load(is); m_b->Load(is); } #ifdef BB_WITH_CEREAL template <class Archive> void save(Archive& archive, std::uint32_t const version) const { _super::save(archive, version); archive(cereal::make_nvp("binary_mode", m_binary_mode)); archive(cereal::make_nvp("input_shape", m_input_shape)); archive(cereal::make_nvp("output_shape", m_output_shape)); archive(cereal::make_nvp("W", *m_W)); archive(cereal::make_nvp("b", *m_b)); } template <class Archive> void load(Archive& archive, std::uint32_t const version) { _super::load(archive, version); archive(cereal::make_nvp("binary_mode", m_binary_mode)); archive(cereal::make_nvp("input_shape", m_input_shape)); archive(cereal::make_nvp("output_shape", m_output_shape)); m_input_node_size = CalcShapeSize(m_input_shape); m_output_node_size = CalcShapeSize(m_output_shape); archive(cereal::make_nvp("W", *m_W)); archive(cereal::make_nvp("b", *m_b)); } void Save(cereal::JSONOutputArchive& archive) const { archive(cereal::make_nvp("DepthwiseDenseAffine", *this)); } void Load(cereal::JSONInputArchive& archive) { archive(cereal::make_nvp("DepthwiseDenseAffine", *this)); } #endif }; }
l2norm.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB LU code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include <math.h> #include "applu.incl" //--------------------------------------------------------------------- // to compute the l2-norm of vector v. //--------------------------------------------------------------------- //--------------------------------------------------------------------- // To improve cache performance, second two dimensions padded by 1 // for even number sizes only. Only needed in v. //--------------------------------------------------------------------- void l2norm (int ldx, int ldy, int ldz, int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, double v[][ldy/2*2+1][ldx/2*2+1][5], double sum[5]) { //--------------------------------------------------------------------- // local variables //--------------------------------------------------------------------- double sum_local[5]; int i, j, k, m; for (m = 0; m < 5; m++) { sum[m] = 0.0; } #pragma omp parallel default(shared) private(i,j,k,m,sum_local) { for (m = 0; m < 5; m++) { sum_local[m] = 0.0; } #pragma omp for nowait for (k = 1; k < nz0-1; k++) { for (j = jst; j < jend; j++) { for (i = ist; i < iend; i++) { for (m = 0; m < 5; m++) { sum_local[m] = sum_local[m] + v[k][j][i][m] * v[k][j][i][m]; } } } } for (m = 0; m < 5; m++) { #pragma omp atomic sum[m] += sum_local[m]; } } //end parallel for (m = 0; m < 5; m++) { sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) ); } }
Example_master.1.c
/* * @@name: master.1c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ #include <stdio.h> extern float average(float,float,float); void master_example( float* x, float* xold, int n, float tol ) { int c, i, toobig; float error, y; c = 0; #pragma omp parallel { do{ #pragma omp for private(i) for( i = 1; i < n-1; ++i ){ xold[i] = x[i]; } #pragma omp single { toobig = 0; } #pragma omp for private(i,y,error) reduction(+:toobig) for( i = 1; i < n-1; ++i ){ y = x[i]; x[i] = average( xold[i-1], x[i], xold[i+1] ); error = y - x[i]; if( error > tol || error < -tol ) ++toobig; } #pragma omp master { ++c; printf( "iteration %d, toobig=%d\n", c, toobig ); } }while( toobig > 0 ); } }
evaluation.c
#include "common.h" #ifdef _OPENMP static int top_down_step(const int level, const int nodes, const int num_frontier, const int degree, const int* restrict adj, int* restrict frontier, int* restrict next, int* restrict distance, char* restrict bitmap) { int count = 0; int local_frontier[nodes]; #pragma omp parallel private(local_frontier) { int local_count = 0; #pragma omp for nowait for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree;j++){ int n = *(adj + v * degree + j); // adj[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; distance[n] = level; local_frontier[local_count++] = n; } } } // end for i #pragma omp critical { memcpy(&next[count], local_frontier, local_count*sizeof(int)); count += local_count; } } return count; } #else static int top_down_step(const int level, const int nodes, const int num_frontier, const int degree, const int* restrict adj, int* restrict frontier, int* restrict next, int* restrict distance, char* restrict bitmap) { int count = 0; for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree;j++){ int n = *(adj + v * degree + j); // int n = adj[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; distance[n] = level; next[count++] = n; } } } return count; } #endif static bool bfs(const int nodes, int based_nodes, const int groups, const int lines, const int degree, const int* restrict adj, int* restrict diam, double* restrict ASPL, const int added_centers) { char *bitmap = malloc(sizeof(char) * nodes); int *frontier = malloc(sizeof(int) * nodes); int *distance = malloc(sizeof(int) * nodes); int *next = malloc(sizeof(int) * nodes); bool reached = true; double sum = 0.0; *diam = 0; for(int s=rank;s<based_nodes;s+=procs){ int num_frontier = 1, level = 0; for(int i=0;i<nodes;i++) bitmap[i] = NOT_VISITED; frontier[0] = s; distance[s] = level; bitmap[s] = VISITED; while(1){ num_frontier = top_down_step(level++, nodes, num_frontier, degree, adj, frontier, next, distance, bitmap); if(num_frontier == 0) break; int *tmp = frontier; frontier = next; next = tmp; } *diam = MAX(*diam, level-1); for(int i=s+1;i<nodes;i++){ if(bitmap[i] == NOT_VISITED) reached = false; if(i < groups*based_nodes) sum += (distance[i] + 1) * (groups - i/based_nodes); else sum += (distance[i] + 1) * groups; // for added_centers } } if(added_centers){ int start_rank = based_nodes % procs; int start_node = based_nodes*groups+rank-start_rank; if(start_node < based_nodes*groups) start_node += procs; for(int s=start_node;s<nodes;s+=procs){ int num_frontier = 1, level = 0; for(int i=0;i<nodes;i++) bitmap[i] = NOT_VISITED; frontier[0] = s; distance[s] = level; bitmap[s] = VISITED; while(1){ num_frontier = top_down_step(level++, nodes, num_frontier, degree, adj, frontier, next, distance, bitmap); if(num_frontier == 0) break; int *tmp = frontier; frontier = next; next = tmp; } *diam = MAX(*diam, level-1); for(int i=s+1;i<nodes;i++){ if(bitmap[i] == NOT_VISITED) reached = false; sum += distance[i] + 1; } } } free(bitmap); free(frontier); free(distance); free(next); MPI_Allreduce(MPI_IN_PLACE, &reached, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_WORLD); if(reached){ MPI_Allreduce(MPI_IN_PLACE, diam, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); *ASPL = sum / ((((double)nodes-1)*nodes)/2); } else{ *diam = INT_MAX; *ASPL = DBL_MAX; timer_stop(TIMER_APSP); } return reached; } static bool matrix_op(const int nodes, const int based_nodes, const int degree, const int* restrict adj, const int groups, int* restrict diam, double* restrict ASPL, const int added_centers) { unsigned int elements = (based_nodes+(UINT64_BITS-1))/UINT64_BITS; unsigned int chunk = (elements+(procs-1))/procs; size_t s = nodes*chunk*sizeof(uint64_t); uint64_t* A = malloc(s); // uint64_t A[nodes][chunk]; uint64_t* B = malloc(s); // uint64_t B[nodes][chunk]; int parsize = (elements+(chunk-1))/chunk; double sum = 0.0; *diam = 1; for(int t=rank;t<parsize;t+=procs){ uint64_t kk, l; clear_buffers(A, B, nodes*chunk); for(l=0; l<UINT64_BITS*chunk && UINT64_BITS*t*chunk+l<based_nodes; l++){ unsigned int offset = (UINT64_BITS*t*chunk+l)*chunk+l/UINT64_BITS; A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS)); } for(kk=0;kk<nodes;kk++){ #pragma omp parallel for for(int i=0;i<nodes;i++) for(int j=0;j<degree;j++){ int n = *(adj + i * degree + j); // int n = adj[i][j]; for(int k=0;k<chunk;k++) B[i*chunk+k] |= A[n*chunk+k]; } uint64_t num1 = 0, num2 = 0; #pragma omp parallel for reduction(+:num1) for(int i=0;i<based_nodes*groups*chunk;i++) num1 += POPCNT(B[i]); #pragma omp parallel for reduction(+:num2) for(int i=based_nodes*groups*chunk;i<nodes*chunk;i++) num2 += POPCNT(B[i]); if(num1+num2 == (uint64_t)nodes*l) break; // swap A <-> B uint64_t* tmp = A; A = B; B = tmp; sum += ((double)based_nodes*groups * l - num1) * groups; sum += ((double)added_centers * l - num2) * groups * 2; } *diam = MAX(*diam, kk+1); } if(added_centers){ elements = (added_centers+(UINT64_BITS-1))/UINT64_BITS; chunk = (elements+(procs-1))/procs; parsize = (elements+(chunk-1))/chunk; int s = based_nodes % procs; int new_rank = (rank - s >= 0)? rank-s : rank-s+procs; for(int t=new_rank;t<parsize;t+=procs){ uint64_t kk, l; clear_buffers(A, B, nodes*chunk); for(l=0; l<UINT64_BITS*chunk && UINT64_BITS*t*chunk+l<added_centers; l++){ unsigned int offset = (UINT64_BITS*t*chunk+l+(nodes-added_centers))*chunk+l/UINT64_BITS; A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS)); } for(kk=0;kk<nodes;kk++){ #pragma omp parallel for for(int i=0;i<nodes;i++) for(int j=0;j<degree;j++){ int n = *(adj + i * degree + j); // int n = adj[i][j]; for(int k=0;k<chunk;k++) B[i*chunk+k] |= A[n*chunk+k]; } uint64_t num = 0; #pragma omp parallel for reduction(+:num) for(int i=based_nodes*groups*chunk;i<nodes*chunk;i++) num += POPCNT(B[i]); if(num == (uint64_t)added_centers*l) break; // swap A <-> B uint64_t* tmp = A; A = B; B = tmp; sum += ((double)added_centers * l - num); } *diam = MAX(*diam, kk+1); } } MPI_Allreduce(MPI_IN_PLACE, diam, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); sum += (double)nodes * (nodes - 1); free(A); free(B); if(*diam < nodes){ *ASPL = sum / (((double)nodes-1)*nodes); return true; } else{ *diam = INT_MAX; *ASPL = DBL_MAX; return false; } } static bool matrix_op_mem_saving(const int nodes, const int based_nodes, const int degree, const int* restrict adj, const int groups, int* restrict diam, double* restrict ASPL, const int added_centers) { unsigned int elements = (based_nodes+(UINT64_BITS-1))/UINT64_BITS; size_t s = nodes*CHUNK*sizeof(uint64_t); uint64_t* A = malloc(s); // uint64_t A[nodes][CHUNK]; uint64_t* B = malloc(s); // uint64_t B[nodes][CHUNK]; int parsize = (elements+(CHUNK-1))/CHUNK; double sum = 0.0; *diam = 1; for(int t=rank;t<parsize;t+=procs){ unsigned int kk, l; clear_buffers(A, B, nodes*CHUNK); for(l=0; l<UINT64_BITS*CHUNK && UINT64_BITS*t*CHUNK+l<based_nodes; l++){ unsigned int offset = (UINT64_BITS*t*CHUNK+l)*CHUNK+l/UINT64_BITS; A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS)); } for(kk=0;kk<nodes;kk++){ #pragma omp parallel for for(int i=0;i<nodes;i++) for(int j=0;j<degree;j++){ int n = *(adj + i * degree + j); // int n = adj[i][j]; for(int k=0;k<CHUNK;k++) B[i*CHUNK+k] |= A[n*CHUNK+k]; } uint64_t num1 = 0, num2 = 0; #pragma omp parallel for reduction(+:num1) for(int i=0;i<based_nodes*groups*CHUNK;i++) num1 += POPCNT(B[i]); #pragma omp parallel for reduction(+:num2) for(int i=based_nodes*groups*CHUNK;i<nodes*CHUNK;i++) num2 += POPCNT(B[i]); if(num1+num2 == (uint64_t)nodes*l) break; // swap A <-> B uint64_t* tmp = A; A = B; B = tmp; sum += ((double)based_nodes*groups * l - num1) * groups; sum += ((double)added_centers * l - num2) * groups * 2; } *diam = MAX(*diam, kk+1); } if(added_centers){ elements = (added_centers+(UINT64_BITS-1))/UINT64_BITS; parsize = (elements+(CHUNK-1))/CHUNK; int s = based_nodes % procs; int new_rank = (rank - s >= 0)? rank-s : rank-s+procs; for(int t=new_rank;t<parsize;t+=procs){ unsigned int kk, l; clear_buffers(A, B, nodes*CHUNK); for(l=0; l<UINT64_BITS*CHUNK && UINT64_BITS*t*CHUNK+l<added_centers; l++){ unsigned int offset = (UINT64_BITS*t*CHUNK+l+(nodes-added_centers))*CHUNK+l/UINT64_BITS; A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS)); } for(kk=0;kk<nodes;kk++){ #pragma omp parallel for for(int i=0;i<nodes;i++) for(int j=0;j<degree;j++){ int n = *(adj + i * degree + j); // int n = adj[i][j]; for(int k=0;k<CHUNK;k++) B[i*CHUNK+k] |= A[n*CHUNK+k]; } uint64_t num = 0; #pragma omp parallel for reduction(+:num) for(int i=based_nodes*groups*CHUNK;i<nodes*CHUNK;i++) num += POPCNT(B[i]); if(num == (uint64_t)added_centers*l) break; // swap A <-> B uint64_t* tmp = A; A = B; B = tmp; sum += ((double)added_centers * l - num); } *diam = MAX(*diam, kk+1); } } MPI_Allreduce(MPI_IN_PLACE, diam, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); sum += (double)nodes * (nodes - 1); free(A); free(B); if(*diam < nodes){ *ASPL = sum / (((double)nodes-1)*nodes); return true; } else{ *diam = INT_MAX; *ASPL = DBL_MAX; return false; } } bool evaluation(const int nodes, int based_nodes, const int groups, const int lines, const int degree, int* restrict adj, int* restrict diam, double* restrict ASPL, const int added_centers, const int algo) { timer_start(TIMER_APSP); bool ret; if(algo == BFS) ret = bfs(nodes, based_nodes, groups, lines, degree, adj, diam, ASPL, added_centers); else if(algo == MATRIX_OP) ret = matrix_op(nodes, based_nodes, degree, adj, groups, diam, ASPL, added_centers); else // (algo == MATRIX_OP_MEM_SAVING) ret = matrix_op_mem_saving(nodes, based_nodes, degree, adj, groups, diam, ASPL, added_centers); timer_stop(TIMER_APSP); return ret; }
openmp-test.c
/* Copyright (c) 1997-2019 OpenMP Architecture Review Board. All rights reserved. Permission to redistribute and use without fee all or part of the source codes and the associated document (the Software), with or without modification, is granted, provided that the following conditions are met: * Redistributions of the software must retain the above copyright notice, this list of conditions and the following disclaimer. * Neither the name of the OpenMP Architecture Review Board nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE OPENMP ARCHITECTURE REVIEW BOARD "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OPENMP ARCHITECTURE REVIEW BOARD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * @@name: fpriv_sections.1c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success */ #include <omp.h> #include <stdio.h> #define NT 4 int main( ) { int section_count = 0; omp_set_dynamic(0); omp_set_num_threads(NT); #pragma omp parallel #pragma omp sections firstprivate( section_count ) { #pragma omp section { section_count++; /* may print the number one or two */ printf( "section_count %d\n", section_count ); } #pragma omp section { section_count++; /* may print the number one or two */ printf( "section_count %d\n", section_count ); } } return 0; }
draw.c
#include "image.h" #include <assert.h> #include <stdlib.h> void heman_draw_points(heman_image* target, heman_points* pts, HEMAN_FLOAT val) { HEMAN_FLOAT* src = pts->data; for (int k = 0; k < pts->width; k++) { HEMAN_FLOAT x = src[0]; HEMAN_FLOAT y = src[1]; src += pts->nbands; int i = x * target->width; int j = y * target->height; if (i < 0 || i >= target->width || j < 0 || j >= target->height) { continue; } HEMAN_FLOAT* texel = heman_image_texel(target, i, j); for (int c = 0; c < target->nbands; c++) { *texel++ = val; } } } void heman_draw_colored_points( heman_image* target, heman_points* pts, const heman_color* colors) { assert(target->nbands == 3 || target->nbands == 4); HEMAN_FLOAT* src = pts->data; HEMAN_FLOAT inv = 1.0f / 255.0f; for (int k = 0; k < pts->width; k++) { HEMAN_FLOAT x = src[0]; HEMAN_FLOAT y = src[1]; src += pts->nbands; int i = x * target->width; int j = y * target->height; if (i < 0 || i >= target->width || j < 0 || j >= target->height) { continue; } HEMAN_FLOAT* texel = heman_image_texel(target, i, j); heman_color rgb = colors[k]; *texel++ = (HEMAN_FLOAT)((rgb >> 16) & 0xff) * inv; *texel++ = (HEMAN_FLOAT)((rgb >> 8) & 0xff) * inv; *texel++ = (HEMAN_FLOAT)(rgb & 0xff) * inv; if (target->nbands == 4) { *texel = (HEMAN_FLOAT)(rgb >> 24) * inv; } } } void heman_draw_colored_circles(heman_image* target, heman_points* pts, int radius, const heman_color* colors) { int fwidth = radius * 2 + 1; int radius2 = radius * radius; HEMAN_FLOAT* src = pts->data; HEMAN_FLOAT inv = 1.0f / 255.0f; int w = target->width; int h = target->height; for (int k = 0; k < pts->width; k++) { HEMAN_FLOAT x = src[0]; HEMAN_FLOAT y = src[1]; src += pts->nbands; int ii = x * w - radius; int jj = y * h - radius; for (int kj = 0; kj < fwidth; kj++) { for (int ki = 0; ki < fwidth; ki++) { int i = ii + ki; int j = jj + kj; int r2 = SQR(i - x * w) + SQR(j - y * h); if (r2 > radius2) { continue; } HEMAN_FLOAT* texel = heman_image_texel(target, i, j); heman_color rgb = colors[k]; *texel++ = (HEMAN_FLOAT)(rgb >> 16) * inv; *texel++ = (HEMAN_FLOAT)((rgb >> 8) & 0xff) * inv; *texel = (HEMAN_FLOAT)(rgb & 0xff) * inv; } } } } void heman_draw_splats( heman_image* target, heman_points* pts, int radius, int blend_mode) { int fwidth = radius * 2 + 1; HEMAN_FLOAT* gaussian_splat = malloc(fwidth * fwidth * sizeof(HEMAN_FLOAT)); generate_gaussian_splat(gaussian_splat, fwidth); HEMAN_FLOAT* src = pts->data; int w = target->width; int h = target->height; for (int i = 0; i < pts->width; i++) { HEMAN_FLOAT x = *src++; HEMAN_FLOAT y = *src++; int ii = x * w - radius; int jj = y * h - radius; for (int kj = 0; kj < fwidth; kj++) { for (int ki = 0; ki < fwidth; ki++) { int i = ii + ki; int j = jj + kj; if (i < 0 || i >= w || j < 0 || j >= h) { continue; } HEMAN_FLOAT* texel = heman_image_texel(target, i, j); for (int c = 0; c < target->nbands; c++) { *texel++ += gaussian_splat[kj * fwidth + ki]; } } } } free(gaussian_splat); } void heman_internal_draw_seeds(heman_image* target, heman_points* pts, int filterd); void heman_draw_contour_from_points(heman_image* target, heman_points* coords, heman_color rgb, float mind, float maxd, int filterd) { assert(target->nbands == 3 || target->nbands == 4); int width = target->width; int height = target->height; heman_image* seed = heman_image_create(width, height, 1); heman_image_clear(seed, 0); heman_internal_draw_seeds(seed, coords, filterd); HEMAN_FLOAT inv = 1.0f / 255.0f; HEMAN_FLOAT r = (HEMAN_FLOAT)((rgb >> 16) & 0xff) * inv; HEMAN_FLOAT g = (HEMAN_FLOAT)((rgb >> 8) & 0xff) * inv; HEMAN_FLOAT b = (HEMAN_FLOAT)(rgb & 0xff) * inv; HEMAN_FLOAT a = 1; if (target->nbands == 4) { a = (HEMAN_FLOAT)(rgb >> 24) * inv; } #pragma omp parallel for for (int y = 0; y < height; y++) { HEMAN_FLOAT* dst = target->data + y * width * target->nbands; for (int x = 0; x < width; x++) { HEMAN_FLOAT dist = *heman_image_texel(seed, x, y); if (dist > mind && dist < maxd) { dst[0] = r; dst[1] = g; dst[2] = b; if (target->nbands == 4) { dst[3] = a; } } dst += target->nbands; } } heman_points_destroy(seed); }
openmp.c
#include<omp.h> double dot(double* a, double* b, long N){ double s = 0.0; #pragma omp parallel for reduction(+: s) for(long n = 0; n < N; n++){ s += a[n]*b[n]; } return s; } void cdot(double* c, double* a, double* b, long N){ double r = 0.0, i = 0.0; #pragma omp parallel for reduction(+: r, i) for(long n = 0; n < N; n++){ r += a[2*n] * b[2*n ] + a[2*n+1] * b[2*n+1]; i += a[2*n] * b[2*n+1] - a[2*n+1] * b[2*n ]; } c[0] = r; c[1] = i; return; } void cdot3(double* c, double* x, double* A, double* y, long M, long N){ double sr = 0.0, si = 0.0; #pragma omp parallel for reduction(+: sr, si) for (long n = 0; n < N; n++){ double tr = 0.0, ti = 0.0; for(long m = 0; m < M; m++){ tr += x[2*m] * A[2*m + 2*n*N] + x[2*m+1] * A[2*m+1 + 2*n*N]; ti += x[2*m] * A[2*m+1 + 2*n*N] - x[2*m+1] * A[2*m + 2*n*N]; } sr += tr * y[2*n ] - ti * y[2*n+1]; si += tr * y[2*n+1] + ti * y[2*n ]; } c[0] = sr; c[1] = si; return; } void conv(double* B, double* A, double* K, long M, long N){ const long offset = 2; #pragma omp parallel for collapse(2) for (long i = offset; i < N-offset; i++){ for (long j = offset; j < M-offset; j++){ double tmp = 0.0; for (long k = -offset; k < offset + 1; k++){ for (long l = -offset; l < offset + 1; l++){ tmp += A[(j+l) + (i+k)*M] * K[(l+offset) + (k+offset)*(2*offset+1)]; } } B[(j-offset) + (i-offset) * (M-2*offset)] = tmp; } } return; }
Wind_dir.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <math.h> #include "grb2.h" #include "wgrib2.h" #include "fnlist.h" /* * Wind dir - U then V in input file * * v 0.1 experimental based on Wind_speed.c * * 3/2009: Public Domain: Wesley Ebisuzaki (wind_speed.c) * 1/2013: Public Domain: Wesley Ebisuzaki (wind_dir.c) * */ extern int decode, file_append, save_translation; extern unsigned int nx_, ny_; extern int flush_mode; extern int use_scale, dec_scale, bin_scale, wanted_bits, max_bits; extern enum output_grib_type grib_type; /* * HEADER:100:wind_dir:output:1:calculate wind direction, X = output gribfile (direction in degrees, 0=wind from north, 90=wind from east) */ int f_wind_dir(ARG1) { struct local_struct { float *val; int has_u; unsigned char *clone_sec[9]; struct seq_file out; }; struct local_struct *save; unsigned int i; int is_u; float *d1, *data_tmp; int discipline, mastertab, parmcat, parmnum; if (mode == -1) { // initialization save_translation = decode = 1; // allocate static variables *local = save = (struct local_struct *) malloc( sizeof(struct local_struct)); if (save == NULL) fatal_error("memory allocation -wind_dir",""); if (fopen_file(&(save->out), arg1, file_append ? "ab" : "wb") != 0) { free(save); fatal_error("Could not open %s", arg1); } save->has_u = 0; init_sec(save->clone_sec); return 0; } save = *local; if (mode == -2) { // cleanup if (save->has_u == 1) { free(save->val); free_sec(save->clone_sec); } fclose_file(&(save->out)); free(save); return 0; } if (mode >= 0) { // processing // get variable name parameters discipline = GB2_Discipline(sec); mastertab = GB2_MasterTable(sec); parmcat = GB2_ParmCat(sec); parmnum = GB2_ParmNum(sec); if (mode == 99) fprintf(stderr,"-wind_speed %d %d %d %d\n",mastertab,discipline,parmcat,parmnum); is_u = (mastertab != 255) && (discipline == 0) && (parmcat == 2) && (parmnum == 2); if (mode == 99 && is_u) fprintf(stderr,"\n-wind_speed: is u\n"); if (is_u) { // save data if (save->has_u) { free(save->val); free_sec(save->clone_sec); } copy_sec(sec, save->clone_sec); copy_data(data,ndata,&(save->val)); GB2_ParmNum(save->clone_sec) = 3; // set id to V save->has_u = 1; return 0; } if (save->has_u == 0) return 0; // check for V if (same_sec0(sec,save->clone_sec) == 1 && same_sec1(sec,save->clone_sec) == 1 && same_sec3(sec,save->clone_sec) == 1 && same_sec4(sec,save->clone_sec) == 1) { // check to see if winds are earth relative if ( (flag_table_3_3(sec) & 8) != 0 || (flag_table_3_3(save->clone_sec) & 8) != 0) { fprintf(stderr,"wind_dir will not work with grid-relative winds, skipping\n"); free(save->val); free_sec(save->clone_sec); save->has_u = 0; return 0; } // calculate wind direction if (mode == 99) fprintf(stderr,"\n-wind_dir: calc wind direction\n"); d1 = save->val; #pragma omp parallel for private(i) for (i = 0; i < ndata; i++) { if (!UNDEFINED_VAL(data[i]) && !UNDEFINED_VAL(d1[i])) { d1[i] = (atan2(d1[i],data[i]) * 180.0 / 3.14159265359 + 180.0); } else d1[i] = UNDEFINED; } GB2_ParmNum(save->clone_sec) = 0; // set id to direction degrees // copy data to temp space if ((data_tmp = (float *) malloc(sizeof(float) * (size_t) ndata)) == NULL) fatal_error("memory allocation - data_tmp",""); undo_output_order(save->val, data_tmp, ndata); grib_wrt(save->clone_sec, data_tmp, ndata, nx_, ny_, use_scale, dec_scale, bin_scale, wanted_bits, max_bits, grib_type, &(save->out)); if (flush_mode) fflush_file(&(save->out)); free(data_tmp); // cleanup free(save->val); free_sec(save->clone_sec); save->has_u = 0; } } return 0; }
2965.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(1) { #pragma omp for schedule(dynamic, 1) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) data[i][j] -= mean[j]; /* Calculate the m * m covariance matrix. */ #pragma omp for schedule(dynamic, 1) for (j1 = 0; j1 < _PB_M; j1++) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
generator.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef GENERATOR_H_ #define GENERATOR_H_ #include <algorithm> #include <cinttypes> #include <random> #include "graph.h" #include "pvector.h" #include "util.h" /* GAP Benchmark Suite Class: Generator Author: Scott Beamer Given scale and degree, generates edgelist for synthetic graph - Intended to be called from Builder - GenerateEL(uniform) generates and returns the edgelist - Can generate uniform random (uniform=true) or R-MAT graph according to Graph500 parameters (uniform=false) - Can also randomize weights within a weighted edgelist (InsertWeights) - Blocking/reseeding is for parallelism with deterministic output edgelist */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, typename TimestampT_ = WeightT_> class Generator { typedef EdgePair<NodeID_, DestID_> Edge; typedef EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_, TimestampT_>> WEdge; typedef pvector<Edge> EdgeList; public: Generator(int scale, int degree) { scale_ = scale; num_nodes_ = 1l << scale; num_edges_ = num_nodes_ * degree; if (num_nodes_ > std::numeric_limits<NodeID_>::max()) { std::cout << "NodeID type (max: " << std::numeric_limits<NodeID_>::max(); std::cout << ") too small to hold " << num_nodes_ << std::endl; std::cout << "Recommend changing NodeID (typedef'd in src/benchmark.h)"; std::cout << " to a wider type and recompiling" << std::endl; std::exit(-31); } } void PermuteIDs(EdgeList &el) { pvector<NodeID_> permutation(num_nodes_); std::mt19937 rng(kRandSeed); #pragma omp parallel for for (NodeID_ n=0; n < num_nodes_; n++) permutation[n] = n; shuffle(permutation.begin(), permutation.end(), rng); #pragma omp parallel for for (int64_t e=0; e < num_edges_; e++) el[e] = Edge(permutation[el[e].u], permutation[el[e].v]); } EdgeList MakeUniformEL() { EdgeList el(num_edges_); #pragma omp parallel { std::mt19937 rng; std::uniform_int_distribution<NodeID_> udist(0, num_nodes_-1); #pragma omp for for (int64_t block=0; block < num_edges_; block+=block_size) { rng.seed(kRandSeed + block/block_size); for (int64_t e=block; e < std::min(block+block_size, num_edges_); e++) { el[e] = Edge(udist(rng), udist(rng)); } } } return el; } EdgeList MakeRMatEL() { const float A = 0.57f, B = 0.19f, C = 0.19f; EdgeList el(num_edges_); #pragma omp parallel { std::mt19937 rng; std::uniform_real_distribution<float> udist(0, 1.0f); #pragma omp for for (int64_t block=0; block < num_edges_; block+=block_size) { rng.seed(kRandSeed + block/block_size); for (int64_t e=block; e < std::min(block+block_size, num_edges_); e++) { NodeID_ src = 0, dst = 0; for (int depth=0; depth < scale_; depth++) { float rand_point = udist(rng); src = src << 1; dst = dst << 1; if (rand_point < A+B) { if (rand_point > A) dst++; } else { src++; if (rand_point > A+B+C) dst++; } } el[e] = Edge(src, dst); } } } PermuteIDs(el); // TIME_PRINT("Shuffle", std::shuffle(el.begin(), el.end(), // std::mt19937())); return el; } EdgeList GenerateEL(bool uniform) { EdgeList el; Timer t; t.Start(); if (uniform) el = MakeUniformEL(); else el = MakeRMatEL(); t.Stop(); PrintTime("Generate Time", t.Seconds()); return el; } static void InsertWeights(pvector<EdgePair<NodeID_, NodeID_>> &el) {} // Overwrites existing weights with random from [1,255] static void InsertWeights(pvector<WEdge> &el) { #pragma omp parallel { std::mt19937 rng; std::uniform_int_distribution<int> udist(1, 255); int64_t el_size = el.size(); #pragma omp for for (int64_t block=0; block < el_size; block+=block_size) { rng.seed(kRandSeed + block/block_size); for (int64_t e=block; e < std::min(block+block_size, el_size); e++) { el[e].v.w = static_cast<WeightT_>(udist(rng)+1); // to make sure weight is not zero } } } } private: int scale_; int64_t num_nodes_; int64_t num_edges_; static const int64_t block_size = 1<<18; }; #endif // GENERATOR_H_
targ-273742.c
#include <omp.h> #include <stdio.h> struct simple_dvector { size_t length; double* data; } ; int main (){ omp_set_default_device(0); int Device = 0; //allocate memory on the device size_t N = 1024*1024*10; int use_device = 1; int chunk = 1; struct simple_dvector x_vec, y_vec; x_vec.data = (double*) omp_target_alloc(N*sizeof(double), Device); y_vec.data = (double*) omp_target_alloc(N*sizeof(double), Device); fprintf(stderr, "CPU: x_vec.data = %p\n",x_vec.data); fprintf(stderr, "CPU: y_vec.data = %p\n",y_vec.data); #pragma omp target map(to:x_vec,y_vec) { printf("GPU: x_vec.data = %p\n",x_vec.data); //works printf("GPU: y_vec.data = %p\n",y_vec.data); //works } #pragma omp target teams distribute parallel for num_teams(120*4) thread_limit(512) schedule(static,chunk) map(to:x_vec,y_vec) if(target:use_device) for (size_t i = 0; i < N; ++i){ x_vec.data[i] = 0.0001*i; //fails y_vec.data[i] = 0.00003*i; } omp_target_free( x_vec.data, Device); omp_target_free( y_vec.data, Device); return 0; }
GB_unaryop__ainv_bool_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_uint8 // op(A') function: GB_tran__ainv_bool_uint8 // C type: bool // A type: uint8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_uint8 ( bool *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Triangular_CSC.h
// // Created by kazem on 7/18/17. // #ifndef TRIANGOPENMP_TRIANGULAR_CSC_H #define TRIANGOPENMP_TRIANGULAR_CSC_H #include <immintrin.h> #include "../common/Reach.h" namespace nasoq { /* ****** Serial implementation */ int lsolve(int n, int *Lp, int *Li, double *Lx, double *x) { int p, j; if (!Lp || !Li || !x) return (0); /* check inputs */ for (j = 0; j < n; j++) { x[j] /= Lx[Lp[j]]; for (p = Lp[j] + 1; p < Lp[j + 1]; p++) { x[Li[p]] -= Lx[p] * x[j]; } } return (1); } /* * L^T x = b */ int ltsolve(int n, int *Lp, int *Li, double *Lx, double *x) { int p, j; if (!Lp || !Li || !x) return (0); /* check inputs */ for (j = n - 1; j >= 0; j--) { for (p = Lp[j] + 1; p < Lp[j + 1]; p++) { x[j] -= Lx[p] * x[Li[p]]; } x[j] /= Lx[Lp[j]]; } return (1); } /* * Counting the number of FLOPS in triangular solve */ unsigned long flopCoutLSolve(int n, int *Lp, int *Li, double *Lx, double *x) { int p, j; unsigned long flopCount = 0; if (!Lp || !Li || !x) return (0); /* check inputs */ for (j = 0; j < n; j++) { x[j] /= Lx[Lp[j]]; flopCount++; for (p = Lp[j] + 1; p < Lp[j + 1]; p++) { x[Li[p]] -= Lx[p] * x[j]; flopCount += 2; } } return (flopCount); } /* ****** Parallel */ int lsolvePar(int n, int *Lp, int *Li, double *Lx, double *x, int levels, int *levelPtr, int *levelSet, int chunk) { if (!Lp || !Li || !x) return (0); /* check inputs */ for (int l = 0; l < levels; ++l) { int li = 0; #pragma omp parallel for \ default(shared) private(li) \ schedule(static) for (li = levelPtr[l]; li < levelPtr[l + 1]; ++li) { int j = levelSet[li]; x[j] /= Lx[Lp[j]]; for (int p = Lp[j] + 1; p < Lp[j + 1]; p++) { double tmp = Lx[p] * x[j]; int idx = Li[p]; #pragma omp atomic x[idx] -= tmp; } } } return (1); } /* ****** Parallel H2 */ int lsolveParH2(int n, int *Lp, int *Li, double *Lx, double *x, int levels, int *levelPtr, int *levelSet, int parts, int *parPtr, int *partition, int chunk) { if (!Lp || !Li || !x) return (0); /* check inputs */ for (int i1 = 0; i1 < levels; ++i1) { #pragma omp parallel //shared(lValues)//private(map, contribs) { #pragma omp for schedule(static) for (int j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) { int j = partition[k1]; x[j] /= Lx[Lp[j]]; // #pragma omp critical for (int p = Lp[j] + 1; p < Lp[j + 1]; p++) { double tmp = Lx[p] * x[j]; int idx = Li[p]; #pragma omp atomic x[idx] -= tmp; } } } } } return (1); } /* * */ int lsolvePar2(int n, int *Lp, int *Li, double *Lx, double *x) { int p, j; if (!Lp || !Li || !x) return (0); /* check inputs */ for (j = 0; j < n; j++) { x[j] /= Lx[Lp[j]]; //#pragma omp parallel for for (p = Lp[j] + 1; p < Lp[j + 1]; p++) { x[Li[p]] -= Lx[p] * x[j]; } } return (1); } /* * Vectorized implementation */ #if 0 typedef union { __m256d v; double d[4]; } v4df_t; int lsolveVectorize(int n, int* Lp, int* Li, const double* Lx, double *x) { double xx; v4df_t reg_Lx; v4df_t reg_x; v4df_t result0, result1, result2, result3; int mod=0; #if 0 for (int k = st ; k < bd1inReach ; k++) { j = reach[k]; xx=x [j]; xx /= Lx [Lp [j]] ; for (p = Lp [j]+1 ; p < Lp [j+1] ; p++) { x [Li [p]] -= Lx [p] * xx; } x[j]=xx; } #endif #if 0 for (int k = bd1 ; k < bd2 ; k++) { xx = x[k]; xx /= Lx[Lp [k]]; reg_x.v = _mm256_set1_pd(xx); mod = (Lp [k+1] - Lp [k] - 1) % 4; for (int i1 = Lp [k] + 1; i1 < Lp [k+1] - mod; i1 += 4) { reg_Lx.v = _mm256_load_pd((double *) (Lx + i1)); result0.v = _mm256_mul_pd(reg_Lx.v, reg_x.v); x[Li[i1]] -= result0.d[0]; x[Li[i1 + 1]] -= result0.d[1]; x[Li[i1 + 2]] -= result0.d[2]; x[Li[i1 + 3]] -= result0.d[3]; } for (int i1 = Lp [k+1] - mod; i1 < Lp [k+1]; ++i1) { x[Li[i1]] -= Lx[i1] * xx; } x[k] = xx; } #endif #if 1 for (int k = 0 ; k < n ; k++) { xx = x[k]; xx /= Lx[Lp [k]]; if(xx != 0){ reg_x.v = _mm256_set1_pd(xx); mod = (Lp [k+1] - Lp [k] - 1) % 16; for (int i1 = Lp [k] + 1; i1 < Lp [k+1] - mod; i1 += 16) { reg_Lx.v = _mm256_load_pd((double *) (Lx + i1)); result0.v = _mm256_mul_pd(reg_Lx.v, reg_x.v); reg_Lx.v = _mm256_load_pd((double *) (Lx + i1+4)); result1.v = _mm256_mul_pd(reg_Lx.v, reg_x.v); reg_Lx.v = _mm256_load_pd((double *) (Lx + i1+8)); result2.v = _mm256_mul_pd(reg_Lx.v, reg_x.v); reg_Lx.v = _mm256_load_pd((double *) (Lx + i1+12)); result3.v = _mm256_mul_pd(reg_Lx.v, reg_x.v); x[Li[i1]] -= result0.d[0]; x[Li[i1 + 1]] -= result0.d[1]; x[Li[i1 + 2]] -= result0.d[2]; x[Li[i1 + 3]] -= result0.d[3]; x[Li[i1 + 4]] -= result1.d[0]; x[Li[i1 + 5]] -= result1.d[1]; x[Li[i1 + 6]] -= result1.d[2]; x[Li[i1 + 7]] -= result1.d[3]; x[Li[i1 + 8]] -= result2.d[0]; x[Li[i1 + 9]] -= result2.d[1]; x[Li[i1 + 10]] -= result2.d[2]; x[Li[i1 + 11]] -= result2.d[3]; x[Li[i1 + 12]] -= result3.d[0]; x[Li[i1 + 13]] -= result3.d[1]; x[Li[i1 + 14]] -= result3.d[2]; x[Li[i1 + 15]] -= result3.d[3]; } for (int i1 = Lp [k+1] - mod; i1 < Lp [k+1]; ++i1) { x[Li[i1]] -= Lx[i1] * xx; } x[k] = xx; } } #endif } #endif /* * Pruned */ int lsolve_reach_dec(int n, int *Gp, int *Gi, double *Gx, int *Bp, int *Bi, double *Bx, int k, int *xi, double *x, const int *pinv, double &symDuration) { int j, p, px, top; std::chrono::time_point<std::chrono::system_clock> start, end; top = reach(n, Gp, Gi, Bp, Bi, k, xi, pinv); start = std::chrono::system_clock::now(); for (px = top; px < n; px++) { j = xi[px]; x[j] /= Gx[(Gp[j])]; p = Gp[j] + 1; for (; p < Gp[j + 1]; p++) { x[Gi[p]] -= Gx[p] * x[j]; } } end = std::chrono::system_clock::now(); std::chrono::duration<double> tmp = end - start; symDuration = tmp.count(); return (top); } /* * only for motive example */ void lSolveSympiler(int n, int *Lp, int *Li, const double *Lx, double *x, int *reachSet, int reachSetSize) { int p, px, j; x[0] /= Lx[0]; // Peel col 0 double x_Li_1 = Lx[1] * x[0]; double x_Li_2 = Lx[2] * x[0]; x[*(Li + 1)] -= x_Li_1; x[*(Li + 2)] -= x_Li_2; for (px = 1; px < 3; px++) { j = reachSet[px]; x[j] /= Lx[Lp[j]]; for (p = Lp[j] + 1; p < Lp[j + 1]; p++) x[Li[p]] -= Lx[p] * x[j]; } x[7] /= Lx[20]; // Peel col 7 double x_Li_21 = Lx[21] * x[7]; double x_Li_22 = Lx[22] * x[7]; x[*(Li + 21)] -= x_Li_21; x[*(Li + 22)] -= x_Li_22; for (px = 4; px < reachSetSize; px++) { j = reachSet[px]; x[j] /= Lx[Lp[j]]; for (p = Lp[j] + 1; p < Lp[j + 1]; p++) x[Li[p]] -= Lx[p] * x[j]; } } } #endif //TRIANGOPENMP_TRIANGULAR_CSC_H
betweennessCentrality.c
#include "defs.h" double betweennessCentrality(graph* G, DOUBLE_T* BC) { VERT_T *S; /* stack of vertices in the order of non-decreasing distance from s. Also used to implicitly represent the BFS queue */ plist* P; /* predecessors of a vertex v on shortest paths from s */ DOUBLE_T* sig; /* No. of shortest paths */ LONG_T* d; /* Length of the shortest path between every pair */ DOUBLE_T* del; /* dependency of vertices */ LONG_T *in_degree, *numEdges, *pSums; LONG_T *pListMem; LONG_T* Srcs; LONG_T *start, *end; LONG_T MAX_NUM_PHASES; LONG_T *psCount; #ifdef _OPENMP omp_lock_t* vLock; LONG_T chunkSize; #endif int seed = 2387; double elapsed_time; #ifdef _OPENMP #pragma omp parallel { #endif VERT_T *myS, *myS_t; LONG_T myS_size; LONG_T i, j, k, p, count, myCount; LONG_T v, w, vert; LONG_T numV, num_traversals, n, m, phase_num; LONG_T tid, nthreads; int* stream; #ifdef DIAGNOSTIC double elapsed_time_part; #endif #ifdef _OPENMP int myLock; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds(); } #endif /* numV: no. of vertices to run BFS from = 2^K4approx */ numV = 1<<K4approx; n = G->n; m = G->m; /* Permute vertices */ if (tid == 0) { Srcs = (LONG_T *) malloc(n*sizeof(LONG_T)); #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); #endif } #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { Srcs[i] = i; } #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { j = n*sprng(stream); if (i != j) { #ifdef _OPENMP int l1 = omp_test_lock(&vLock[i]); if (l1) { int l2 = omp_test_lock(&vLock[j]); if (l2) { #endif k = Srcs[i]; Srcs[i] = Srcs[j]; Srcs[j] = k; #ifdef _OPENMP omp_unset_lock(&vLock[j]); } omp_unset_lock(&vLock[i]); } #endif } } #ifdef _OPENMP #pragma omp barrier #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "Vertex ID permutation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Start timing code from here */ if (tid == 0) { elapsed_time = get_seconds(); #ifdef VERIFYK4 MAX_NUM_PHASES = 2*sqrt(n); #else MAX_NUM_PHASES = 50; #endif } #ifdef _OPENMP #pragma omp barrier #endif /* Initialize predecessor lists */ /* The size of the predecessor list of each vertex is bounded by its in-degree. So we first compute the in-degree of every vertex */ if (tid == 0) { P = (plist *) calloc(n, sizeof(plist)); in_degree = (LONG_T *) calloc(n+1, sizeof(LONG_T)); numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T)); pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { v = G->endV[i]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif in_degree[v]++; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } prefix_sums(in_degree, numEdges, pSums, n); if (tid == 0) { pListMem = (LONG_T *) malloc(m*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<n; i++) { P[i].list = pListMem + numEdges[i]; P[i].degree = in_degree[i]; P[i].count = 0; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "In-degree computation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Allocate shared memory */ if (tid == 0) { free(in_degree); free(numEdges); free(pSums); S = (VERT_T *) malloc(n*sizeof(VERT_T)); sig = (DOUBLE_T *) malloc(n*sizeof(DOUBLE_T)); d = (LONG_T *) malloc(n*sizeof(LONG_T)); del = (DOUBLE_T *) calloc(n, sizeof(DOUBLE_T)); start = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); end = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); psCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T)); } /* local memory for each thread */ myS_size = (2*n)/nthreads; myS = (LONG_T *) malloc(myS_size*sizeof(LONG_T)); num_traversals = 0; myCount = 0; #ifdef _OPENMP #pragma omp barrier #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { d[i] = -1; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC initialization time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif for (p=0; p<n; p++) { i = Srcs[p]; if (G->numEdges[i+1] - G->numEdges[i] == 0) { continue; } else { num_traversals++; } if (num_traversals == numV + 1) { break; } if (tid == 0) { sig[i] = 1; d[i] = 0; S[0] = i; start[0] = 0; end[0] = 1; } count = 1; phase_num = 0; #ifdef _OPENMP #pragma omp barrier #endif while (end[phase_num] - start[phase_num] > 0) { myCount = 0; #ifdef _OPENMP #pragma omp barrier #pragma omp for schedule(dynamic) #endif for (vert = start[phase_num]; vert < end[phase_num]; vert++) { v = S[vert]; for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) { #ifndef VERIFYK4 /* Filter edges with weights divisible by 8 */ if ((G->weight[j] & 7) != 0) { #endif w = G->endV[j]; if (v != w) { #ifdef _OPENMP myLock = omp_test_lock(&vLock[w]); if (myLock) { #endif /* w found for the first time? */ if (d[w] == -1) { if (myS_size == myCount) { /* Resize myS */ myS_t = (LONG_T *) malloc(2*myS_size*sizeof(VERT_T)); memcpy(myS_t, myS, myS_size*sizeof(VERT_T)); free(myS); myS = myS_t; myS_size = 2*myS_size; } myS[myCount++] = w; d[w] = d[v] + 1; sig[w] = sig[v]; P[w].list[P[w].count++] = v; } else if (d[w] == d[v] + 1) { sig[w] += sig[v]; P[w].list[P[w].count++] = v; } #ifdef _OPENMP omp_unset_lock(&vLock[w]); } else { if ((d[w] == -1) || (d[w] == d[v]+ 1)) { omp_set_lock(&vLock[w]); sig[w] += sig[v]; P[w].list[P[w].count++] = v; omp_unset_lock(&vLock[w]); } } #endif } #ifndef VERIFYK4 } #endif } } /* Merge all local stacks for next iteration */ phase_num++; psCount[tid+1] = myCount; #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { start[phase_num] = end[phase_num-1]; psCount[0] = start[phase_num]; for(k=1; k<=nthreads; k++) { psCount[k] = psCount[k-1] + psCount[k]; } end[phase_num] = psCount[nthreads]; } #ifdef _OPENMP #pragma omp barrier #endif for (k = psCount[tid]; k < psCount[tid+1]; k++) { S[k] = myS[k-psCount[tid]]; } #ifdef _OPENMP #pragma omp barrier #endif count = end[phase_num]; } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif while (phase_num > 0) { #ifdef _OPENMP #pragma omp for #endif for (j=start[phase_num]; j<end[phase_num]; j++) { w = S[j]; for (k = 0; k<P[w].count; k++) { v = P[w].list[k]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif del[v] = del[v] + sig[v]*(1+del[w])/sig[w]; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } BC[w] += del[w]; } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif } #ifdef _OPENMP chunkSize = n/nthreads; #pragma omp for schedule(static, chunkSize) #endif for (j=0; j<count; j++) { w = S[j]; d[w] = -1; del[w] = 0; P[w].count = 0; } #ifdef _OPENMP #pragma omp barrier #endif } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC computation time: %lf seconds\n", elapsed_time_part); } #endif #ifdef _OPENMP #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #endif free(myS); if (tid == 0) { free(S); free(pListMem); free(P); free(sig); free(d); free(del); #ifdef _OPENMP free(vLock); #endif free(start); free(end); free(psCount); elapsed_time = get_seconds() - elapsed_time; free(Srcs); } free_sprng(stream); #ifdef _OPENMP } #endif /* Verification */ #ifdef VERIFYK4 double BCval; if (SCALE % 2 == 0) { BCval = 0.5*pow(2, 3*SCALE/2)-pow(2, SCALE)+1.0; } else { BCval = 0.75*pow(2, (3*SCALE-1)/2)-pow(2, SCALE)+1.0; } int failed = 0; for (int i=0; i<G->n; i++) { if (round(BC[i] - BCval) != 0) { failed = 1; break; } } if (failed) { fprintf(stderr, "Kernel 4 failed validation!\n"); } else { fprintf(stderr, "Kernel 4 validation successful!\n"); } #endif return elapsed_time; }
main.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #define DIMENSION 1000 int** create_random_matrix() { int **matrix = (int**) malloc(DIMENSION * sizeof(int*)); for(int i = 0; i < DIMENSION; i++) matrix[i] = (int*) malloc(DIMENSION * sizeof(int)); for (int i = 0; i < DIMENSION; ++i) for (int j = 0; j < DIMENSION; ++j) matrix[i][j] = (rand() % 8) + 1; // NOLINT(cert-msc50-cpp) return matrix; } int** create_diagonal_matrix() { int **matrix = (int**) malloc(DIMENSION * sizeof(int*)); for(int i = 0; i < DIMENSION; i++) matrix[i] = (int*) malloc(DIMENSION * sizeof(int)); for (int i = 0; i < DIMENSION; ++i) for (int j = 0; j < DIMENSION; ++j) matrix[i][j] = (i == j) ? (rand() % 8) + 1 : 0; // NOLINT(cert-msc50-cpp) return matrix; } int** create_upper_triangular_matrix() { int **matrix = (int**) malloc(DIMENSION * sizeof(int*)); for(int i = 0; i < DIMENSION; i++) matrix[i] = (int*) malloc(DIMENSION * sizeof(int)); for (int i = 0; i < DIMENSION; ++i) for (int j = 0; j < DIMENSION; ++j) matrix[i][j] = (i > j) ? 0 : (rand() % 8) + 1; // NOLINT(cert-msc50-cpp) return matrix; } int** create_lower_triangular_matrix() { int **matrix = (int**) malloc(DIMENSION * sizeof(int*)); for(int i = 0; i < DIMENSION; i++) matrix[i] = (int*) malloc(DIMENSION * sizeof(int)); for (int i = 0; i < DIMENSION; ++i) for (int j = 0; j < DIMENSION; ++j) matrix[i][j] = (i < j) ? 0 : (rand() % 8) + 1; // NOLINT(cert-msc50-cpp) return matrix; } void print_matrix(int** matrix) { for (int i = 0; i < DIMENSION; ++i) { for (int j = 0; j < DIMENSION; ++j) printf("%3d ", matrix[i][j]); putchar('\n'); } } int** multiply_two_matrices(int** A, int** B) { int **matrix = (int **) malloc(DIMENSION * sizeof(int *)); for (int i = 0; i < DIMENSION; i++) matrix[i] = (int *) malloc(DIMENSION * sizeof(int)); int i, j, k; #pragma omp parallel for private(i, j, k) shared(matrix, A, B) collapse(2) default(none) for (i = 0; i < DIMENSION; ++i) { for (j = 0; j < DIMENSION; ++j) { matrix[i][j] = 0; for (k = 0; k < DIMENSION; ++k) matrix[i][j] += A[i][k] * B[k][j]; } } return matrix; } int** multiply_two_diagonal_matrices(int** A, int** B) { int **matrix = (int **) malloc(DIMENSION * sizeof(int *)); for (int i = 0; i < DIMENSION; i++) matrix[i] = (int *) malloc(DIMENSION * sizeof(int)); int i, j; #pragma omp parallel for private(i, j) shared(matrix, A, B) collapse(2) default(none) for (i = 0; i < DIMENSION; ++i) for (j = 0; j < DIMENSION; ++j) matrix[i][j] = (i == j) ? A[i][j] * B[i][j] : 0; return matrix; } int** multiply_two_upper_triangular_matrices(int** A, int** B) { int **matrix = (int **) malloc(DIMENSION * sizeof(int *)); for (int i = 0; i < DIMENSION; i++) matrix[i] = (int *) malloc(DIMENSION * sizeof(int)); int i, j, k; #pragma omp parallel for private(i, j, k) shared(matrix, A, B) collapse(2) default(none) for (i = 0; i < DIMENSION; ++i) { for (j = 0; j < DIMENSION; ++j) { matrix[i][j] = 0; if (i > j) for (k = 0; k < DIMENSION; ++k) matrix[i][j] += A[i][k] * B[k][j]; } } return matrix; } int** multiply_two_lower_triangular_matrices(int** A, int** B) { int **matrix = (int **) malloc(DIMENSION * sizeof(int *)); for (int i = 0; i < DIMENSION; i++) matrix[i] = (int *) malloc(DIMENSION * sizeof(int)); int i, j, k; #pragma omp parallel for private(i, j, k) shared(matrix, A, B) collapse(2) default(none) for (i = 0; i < DIMENSION; ++i) { for (j = 0; j < DIMENSION; ++j) { matrix[i][j] = 0; if (i < j) for (k = 0; k < DIMENSION; ++k) matrix[i][j] += A[i][k] * B[k][j]; } } return matrix; } void free_matrix(int** matrix) { for (int i = 0; i < DIMENSION; ++i) free(matrix[i]); free(matrix); } int main() { srand(time(NULL)); // NOLINT(cert-msc51-cpp) omp_set_num_threads(omp_get_num_procs()); double start_time, run_time; int **A, **B, **product; printf("Multiplying two random square matrices (%dx%d) ...\n", DIMENSION, DIMENSION); start_time = omp_get_wtime(); A = create_random_matrix(); B = create_random_matrix(); product = multiply_two_matrices(A, B); run_time = omp_get_wtime() - start_time; printf("Done. Took %.4f s.\n", run_time); printf("Multiplying two diagonal matrices (%dx%d) ...\n", DIMENSION, DIMENSION); start_time = omp_get_wtime(); A = create_diagonal_matrix(); B = create_diagonal_matrix(); product = multiply_two_diagonal_matrices(A, B); run_time = omp_get_wtime() - start_time; printf("Done. Took %.4f s.\n", run_time); printf("Multiplying two upper triangular matrices (%dx%d) ...\n", DIMENSION, DIMENSION); start_time = omp_get_wtime(); A = create_upper_triangular_matrix(); B = create_upper_triangular_matrix(); product = multiply_two_upper_triangular_matrices(A, B); run_time = omp_get_wtime() - start_time; printf("Done. Took %.4f s.\n", run_time); printf("Multiplying two lower triangular matrices (%dx%d) ...\n", DIMENSION, DIMENSION); start_time = omp_get_wtime(); A = create_lower_triangular_matrix(); B = create_lower_triangular_matrix(); product = multiply_two_lower_triangular_matrices(A, B); run_time = omp_get_wtime() - start_time; printf("Done. Took %.4f s.\n", run_time); free_matrix(A); free_matrix(B); free_matrix(product); return 0; }
GB_binop__isgt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint16) // A*D function (colscale): GB (_AxD__isgt_uint16) // D*A function (rowscale): GB (_DxB__isgt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint16) // C=scalar+B GB (_bind1st__isgt_uint16) // C=scalar+B' GB (_bind1st_tran__isgt_uint16) // C=A+scalar GB (_bind2nd__isgt_uint16) // C=A'+scalar GB (_bind2nd_tran__isgt_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT16 || GxB_NO_ISGT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isgt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
TransferOP.h
/* * TransferOP.h * * Created on: Jul 20, 2016 * Author: mason */ #ifndef TransferOP_H_ #define TransferOP_H_ #include "Param.h" #include "MyLib.h" #include "Node.h" #include "Graph.h" class TransferParams { public: vector<Param> W; PAlphabet elems; int nVSize; int nInSize; int nOutSize; public: TransferParams() { nVSize = 0; } inline void exportAdaParams(ModelUpdate& ada) { for(int idx = 0; idx < nVSize; idx++) { ada.addParam(&(W[idx])); } } inline void initial(PAlphabet alpha, int nOSize, int nISize) { elems = alpha; nVSize = elems->size(); nInSize = nISize; nOutSize = nOSize; W.resize(nVSize); for(int idx = 0; idx < nVSize; idx++) { W[idx].initial(nOSize, nISize); } } inline int getElemId(const string& strFeat) { return elems->from_string(strFeat); } // will add it inline void save(std::ofstream &os) const { } // will add it inline void load(std::ifstream &is) { } }; class TransferNode : public Node { public: PNode in; int xid; TransferParams* param; public: TransferNode() : Node() { in = NULL; xid = -1; param = NULL; node_type = "transfer"; } inline void setParam(TransferParams* paramInit) { param = paramInit; } inline void clearValue() { Node::clearValue(); in = NULL; xid = -1; } public: void forward(Graph *cg, PNode x, const string& strNorm) { in = x; xid = param->getElemId(strNorm); if (xid < 0) { std::cout << "TransferNode warning: could find the label: " << strNorm << std::endl; } degree = 0; in->addParent(this); cg->addNode(this); } public: void compute() { if (xid >= 0) { val.mat() = param->W[xid].val.mat() * in->val.mat(); } } void backward() { if(xid >= 0) { param->W[xid].grad.mat() += loss.mat() * in->val.tmat(); in->loss.mat() += param->W[xid].val.mat().transpose() * loss.mat(); } } public: inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding inline bool typeEqual(PNode other) { bool result = Node::typeEqual(other); if (!result) return false; TransferNode* conv_other = (TransferNode*)other; if (param != conv_other->param) { return false; } if (xid != conv_other->xid) { return false; } return true; } }; class TransferExecute :public Execute { public: bool bTrain; public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } }; inline PExecute TransferNode::generate(bool bTrain, dtype cur_drop_factor) { TransferExecute* exec = new TransferExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; return exec; }; #endif /* TransferOP_H_ */
deconvolution_packnto1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packnto1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl); const float* kptr = (const float*)weight_data_packnto1 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * packn; int k = y * kernel_w + x; vfloat32m1_t _val = vle32_v_f32m1(sptr, vl); vfloat32m1_t _w = vle32_v_f32m1(kptr + k * packn, vl); _sum = vfmacc_vv_f32m1(_sum, _val, _w, vl); } } kptr += maxk * packn; } sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
calcCondQB.c
#include <mex.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <string.h> int min(int A, int B) { if (A < B) { return A; } else { return B; } } int max(int A, int B) { if (A > B) { return A; } else { return B; } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* Input variables */ double *mu = mxGetPr(prhs[0]); double *var = mxGetPr(prhs[1]); int numRows = (int) mxGetScalar(prhs[2]); int numBounds = (int) mxGetScalar(prhs[3]); int numColumnsShape = (int) mxGetScalar(prhs[4]); int numColumnsPred = (int) mxGetScalar(prhs[5]); int *colA = (int*) mxGetData(prhs[6]); double *colAFac = mxGetPr(prhs[7]); int *colB = (int*) mxGetData(prhs[8]); double *colBFac = mxGetPr(prhs[9]); double eps = mxGetScalar(prhs[10]); /* intern variables and pointers */ float* condQB = NULL; int* cmin = NULL; int* cmax = NULL; float factor,varinv,varAvg,muAvg; int i,j,k,idx,idx2,numNotZero,muFloor,startVal,stopVal,idxA,idxB; /* 2-D matrix with [numBounds,numColumnsPred] */ plhs[0] = mxCreateNumericMatrix(numRows,numBounds*numColumnsPred,mxSINGLE_CLASS,mxREAL); condQB = (float *) mxGetPr(plhs[0]); plhs[1] = mxCreateNumericMatrix(1,numBounds*numColumnsPred,mxINT32_CLASS,mxREAL); cmin = (int *)mxGetData(plhs[1]); plhs[2] = mxCreateNumericMatrix(1,numBounds*numColumnsPred,mxINT32_CLASS,mxREAL); cmax = (int *)mxGetData(plhs[2]); /* negative entropy of q_c */ #pragma omp parallel for private(j,i,idx,idx2,startVal,stopVal,varinv,numNotZero,muFloor,factor,varAvg,muAvg,idxA,idxB) for (k=0; k < numBounds; k++) { for (j=0; j < numColumnsPred; j++) { /*idx = k*numColumnsShape + j; factor = 1/sqrt(2*3.1415926535897*var[idx]); muFloor = (int) mu[idx];*/ idx = k*numColumnsPred + j; idxA = colA[j]+k*numColumnsShape; idxB = colB[j]+k*numColumnsShape; varAvg = (float) (colAFac[j]*var[idxA] + colBFac[j]*var[idxB]); muAvg = (float) (colAFac[j]*mu[idxA] + colBFac[j]*mu[idxB]); factor = 1/sqrtf(2*3.1415926535897*varAvg); muFloor = (int) muAvg; /* calculate rows for which the gaussian is larger than threshold */ numNotZero = (int) ceil(abs(sqrt(-log(eps*factor)*2*varAvg))); startVal = max(muFloor-numNotZero,1); stopVal = min(muFloor+numNotZero,numRows); cmin[idx] = startVal-1; cmax[idx] = stopVal-1; idx2 = idx*numRows; varinv = -1/(2*varAvg); for (i=startVal; i <= stopVal; i++) { condQB[idx2 + i - 1] = factor*expf(varinv*(i-muAvg)*(i-muAvg)); } } } }
pdlansy.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzlansy.c, normal z -> d, Fri Sep 28 17:38:13 2018 * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (double*)plasma_tile_addr(A, m, n) /***************************************************************************//** * Parallel tile calculation of max, one, infinity or Frobenius matrix norm * for a symmetric matrix. ******************************************************************************/ void plasma_pdlansy(plasma_enum_t norm, plasma_enum_t uplo, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; switch (norm) { double stub; double *workspace; double *scale; double *sumsq; //================ // PlasmaMaxNorm //================ case PlasmaMaxNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); if (uplo == PlasmaLower) { for (int n = 0; n < m; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dlange(PlasmaMaxNorm, mvam, nvan, A(m, n), ldam, &stub, &work[A.mt*n+m], sequence, request); } } else { // PlasmaUpper for (int n = m+1; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dlange(PlasmaMaxNorm, mvam, nvan, A(m, n), ldam, &stub, &work[A.mt*n+m], sequence, request); } } plasma_core_omp_dlansy(PlasmaMaxNorm, uplo, mvam, A(m, m), ldam, &stub, &work[A.mt*m+m], sequence, request); } #pragma omp taskwait plasma_core_omp_dlansy(PlasmaMaxNorm, uplo, A.nt, work, A.mt, &stub, value, sequence, request); break; //================ // PlasmaOneNorm //================ case PlasmaOneNorm: case PlasmaInfNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); if (uplo == PlasmaLower) { for (int n = 0; n < m; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dlange_aux(PlasmaOneNorm, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); plasma_core_omp_dlange_aux(PlasmaInfNorm, mvam, nvan, A(m, n), ldam, &work[A.n*n+m*A.nb], sequence, request); } } else { // PlasmaUpper for (int n = m+1; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dlange_aux(PlasmaOneNorm, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); plasma_core_omp_dlange_aux(PlasmaInfNorm, mvam, nvan, A(m, n), ldam, &work[A.n*n+m*A.nb], sequence, request); } } plasma_core_omp_dlansy_aux(PlasmaOneNorm, uplo, mvam, A(m, m), ldam, &work[A.n*m+m*A.nb], sequence, request); } #pragma omp taskwait workspace = work + A.mt*A.n; plasma_core_omp_dlange(PlasmaInfNorm, A.n, A.mt, work, A.n, workspace, value, sequence, request); break; //====================== // PlasmaFrobeniusNorm //====================== case PlasmaFrobeniusNorm: scale = work; sumsq = work + A.mt*A.nt; for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); if (uplo == PlasmaLower) { for (int n = 0; n < m; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dgessq(mvam, nvan, A(m, n), ldam, &scale[A.mt*n+m], &sumsq[A.mt*n+m], sequence, request); } } else { // PlasmaUpper for (int n = m+1; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dgessq(mvam, nvan, A(m, n), ldam, &scale[A.mt*m+n], &sumsq[A.mt*m+n], sequence, request); } } plasma_core_omp_dsyssq(uplo, mvam, A(m, m), ldam, &scale[A.mt*m+m], &sumsq[A.mt*m+m], sequence, request); } #pragma omp taskwait plasma_core_omp_dsyssq_aux(A.mt, A.nt, scale, sumsq, value, sequence, request); break; } }
maxwell_physbdy.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * OpenMP Problems * * Need to fix the way these variables are set and incremented in loops: * cnt * ******************************************************************************/ #include "_hypre_sstruct_ls.h" /*-------------------------------------------------------------------------- * Finds the physical boundary boxes for all levels. Since the coarse grid's * boundary may not be on the physical bdry, we need to compare the coarse * grid to the finest level boundary boxes. All boxes of the coarse grids * must be checked, not just the bounding box. * Algo: * 1) obtain boundary boxes for the finest grid * i) mark the fboxes that have boundary elements. * 2) loop over coarse levels * i) for a cbox that maps to a fbox that has boundary layers * a) refine the cbox * b) intersect with the cell boundary layers of the fbox * c) coarsen the intersection * ii) determine the var boxes * iii) mark the coarse box * * Concerns: Checking an individual pgrid may give artificial physical * boundaries. Need to check if any other pgrid is adjacent to it. * We omit this case and assume only one part for now. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_Maxwell_PhysBdy( hypre_SStructGrid **grid_l, HYPRE_Int num_levels, hypre_Index rfactors, HYPRE_Int ***BdryRanksl_ptr, HYPRE_Int **BdryRanksCntsl_ptr ) { MPI_Comm comm= (grid_l[0]-> comm); HYPRE_Int **BdryRanks_l; HYPRE_Int *BdryRanksCnts_l; HYPRE_Int *npts; HYPRE_Int *ranks, *upper_rank, *lower_rank; hypre_BoxManEntry *boxman_entry; hypre_SStructGrid *grid; hypre_SStructPGrid *pgrid; hypre_StructGrid *cell_fgrid, *cell_cgrid, *sgrid; hypre_BoxArrayArray ****bdry; hypre_BoxArrayArray *fbdry; hypre_BoxArrayArray *cbdry; hypre_BoxArray *box_array; hypre_BoxArray *fboxes, *cboxes; hypre_Box *fbox, *cbox; hypre_Box *box, *contract_fbox, rbox; hypre_Box intersect; HYPRE_Int **cbox_mapping, **fbox_mapping; HYPRE_Int **boxes_with_bdry; HYPRE_Int ndim, nvars; HYPRE_Int nboxes, nfboxes; HYPRE_Int boxi; hypre_Index zero_shift, upper_shift, lower_shift; hypre_Index loop_size, start, index, lindex; HYPRE_Int i, j, k, l, m, n, p; HYPRE_Int d; HYPRE_Int cnt; HYPRE_Int part= 0; /* NOTE, ASSUMING ONE PART */ HYPRE_Int matrix_type= HYPRE_PARCSR; HYPRE_Int myproc; HYPRE_Int ierr= 0; hypre_MPI_Comm_rank(comm, &myproc); ndim= hypre_SStructGridNDim(grid_l[0]); hypre_SetIndex3(zero_shift, 0, 0, 0); hypre_BoxInit(&intersect, ndim); /* bounding global ranks of this processor & allocate boundary box markers. */ upper_rank= hypre_CTAlloc(HYPRE_Int, num_levels); lower_rank= hypre_CTAlloc(HYPRE_Int, num_levels); boxes_with_bdry= hypre_TAlloc(HYPRE_Int *, num_levels); for (i= 0; i< num_levels; i++) { grid = grid_l[i]; lower_rank[i]= hypre_SStructGridStartRank(grid); /* note we are assuming only one part */ pgrid= hypre_SStructGridPGrid(grid, part); nvars= hypre_SStructPGridNVars(pgrid); sgrid= hypre_SStructPGridSGrid(pgrid, nvars-1); box_array= hypre_StructGridBoxes(sgrid); box = hypre_BoxArrayBox(box_array, hypre_BoxArraySize(box_array)-1); hypre_SStructGridBoxProcFindBoxManEntry(grid, part, nvars-1, hypre_BoxArraySize(box_array)-1, myproc, &boxman_entry); hypre_SStructBoxManEntryGetGlobalCSRank(boxman_entry, hypre_BoxIMax(box), &upper_rank[i]); sgrid= hypre_SStructPGridCellSGrid(pgrid); box_array= hypre_StructGridBoxes(sgrid); boxes_with_bdry[i]= hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(box_array)); } /*----------------------------------------------------------------------------- * construct box_number mapping between levels, and offset strides because of * projection coarsening. Note: from the way the coarse boxes are created and * numbered, to determine the coarse box that matches the fbox, we need to * only check the tail end of the list of cboxes. In fact, given fbox_i, * if it's coarsened extents do not interesect with the first coarse box of the * tail end, then this fbox vanishes in the coarsening. * c/fbox_mapping gives the fine/coarse box mapping between two consecutive levels * of the multilevel hierarchy. *-----------------------------------------------------------------------------*/ if (num_levels > 1) { cbox_mapping= hypre_CTAlloc(HYPRE_Int *, num_levels); fbox_mapping= hypre_CTAlloc(HYPRE_Int *, num_levels); } for (i= 0; i< (num_levels-1); i++) { grid = grid_l[i]; pgrid= hypre_SStructGridPGrid(grid, 0); /* assuming one part */ cell_fgrid= hypre_SStructPGridCellSGrid(pgrid); fboxes= hypre_StructGridBoxes(cell_fgrid); nfboxes= hypre_BoxArraySize(hypre_StructGridBoxes(cell_fgrid)); fbox_mapping[i]= hypre_CTAlloc(HYPRE_Int, nfboxes); grid = grid_l[i+1]; pgrid= hypre_SStructGridPGrid(grid, 0); /* assuming one part */ cell_cgrid= hypre_SStructPGridCellSGrid(pgrid); cboxes= hypre_StructGridBoxes(cell_cgrid); nboxes= hypre_BoxArraySize(hypre_StructGridBoxes(cell_cgrid)); cbox_mapping[i+1]= hypre_CTAlloc(HYPRE_Int, nboxes); /* assuming if i1 > i2 and (box j1) is coarsened from (box i1) and (box j2) from (box i2), then j1 > j2. */ k= 0; hypre_ForBoxI(j, fboxes) { fbox= hypre_BoxArrayBox(fboxes, j); hypre_CopyBox(fbox, &rbox); hypre_ProjectBox(&rbox, zero_shift, rfactors); hypre_StructMapFineToCoarse(hypre_BoxIMin(&rbox), zero_shift, rfactors, hypre_BoxIMin(&rbox)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&rbox), zero_shift, rfactors, hypre_BoxIMax(&rbox)); /* since the ordering of the cboxes was determined by the fbox ordering, we only have to check if the first cbox in the list intersects with rbox. If not, this fbox vanished in the coarsening. */ cbox= hypre_BoxArrayBox(cboxes, k); hypre_IntersectBoxes(&rbox, cbox, &rbox); if (hypre_BoxVolume(&rbox)) { cbox_mapping[i+1][k]= j; fbox_mapping[i][j]= k; k++; } /* if (hypre_BoxVolume(&rbox)) */ } /* hypre_ForBoxI(j, fboxes) */ } /* for (i= 0; i< (num_levels-1); i++) */ bdry= hypre_TAlloc(hypre_BoxArrayArray ***, num_levels); npts= hypre_CTAlloc(HYPRE_Int, num_levels); /* finest level boundary determination */ grid = grid_l[0]; pgrid= hypre_SStructGridPGrid(grid, 0); /* assuming one part */ nvars= hypre_SStructPGridNVars(pgrid); cell_fgrid= hypre_SStructPGridCellSGrid(pgrid); nboxes= hypre_BoxArraySize(hypre_StructGridBoxes(cell_fgrid)); hypre_Maxwell_PNedelec_Bdy(cell_fgrid, pgrid, &bdry[0]); for (i= 0; i< nboxes; i++) { if (bdry[0][i]) /* boundary layers on box[i] */ { for (j= 0; j< nvars; j++) { fbdry= bdry[0][i][j+1]; /*(j+1) since j= 0 stores cell-centred boxes*/ hypre_ForBoxArrayI(k, fbdry) { box_array= hypre_BoxArrayArrayBoxArray(fbdry, k); hypre_ForBoxI(p, box_array) { box= hypre_BoxArrayBox(box_array, p); npts[0]+= hypre_BoxVolume(box); } } } /* for (j= 0; j< nvars; j++) */ boxes_with_bdry[0][i]= 1; /* mark this box as containing boundary layers */ } /* if (bdry[0][i]) */ } nfboxes= nboxes; /* coarser levels */ for (i= 1; i< num_levels; i++) { grid = grid_l[i-1]; pgrid= hypre_SStructGridPGrid(grid, 0); /* assuming one part */ cell_fgrid= hypre_SStructPGridCellSGrid(pgrid); fboxes= hypre_StructGridBoxes(cell_fgrid); grid = grid_l[i]; pgrid= hypre_SStructGridPGrid(grid, 0); /* assuming one part */ cell_cgrid= hypre_SStructPGridCellSGrid(pgrid); nvars= hypre_SStructPGridNVars(pgrid); cboxes= hypre_StructGridBoxes(cell_cgrid); nboxes= hypre_BoxArraySize(hypre_StructGridBoxes(cell_cgrid)); bdry[i]= hypre_TAlloc(hypre_BoxArrayArray **, nboxes); p= 2*(ndim-1); for (j= 0; j< nboxes; j++) { bdry[i][j]= hypre_TAlloc(hypre_BoxArrayArray *, nvars+1); /* cell grid boxarrayarray */ bdry[i][j][0]= hypre_BoxArrayArrayCreate(2*ndim, ndim); /* var grid boxarrayarrays */ for (k= 0; k< nvars; k++) { bdry[i][j][k+1]= hypre_BoxArrayArrayCreate(p, ndim); } } /* check if there are boundary points from the previous level */ for (j= 0; j< nfboxes; j++) { /* see if the j box of level (i-1) has any boundary layers */ if (boxes_with_bdry[i-1][j]) { boxi= fbox_mapping[i-1][j]; cbox= hypre_BoxArrayBox(cboxes, boxi); fbox= hypre_BoxArrayBox(fboxes, j); /* contract the fbox so that divisible in rfactor */ contract_fbox= hypre_BoxContraction(fbox, cell_fgrid, rfactors); /* refine the cbox. Expand the refined cbox so that the complete chunk of the fine box that coarsened to it is included. This requires some offsets */ hypre_ClearIndex(upper_shift); hypre_ClearIndex(lower_shift); for (k= 0; k< ndim; k++) { m= hypre_BoxIMin(contract_fbox)[k]; p= m%rfactors[k]; if (p > 0 && m > 0) { upper_shift[k]= p-1; lower_shift[k]= p-rfactors[k]; } else { upper_shift[k]= rfactors[k]-p-1; lower_shift[k]=-p; } } hypre_BoxDestroy(contract_fbox); hypre_CopyBox(cbox, &rbox); hypre_StructMapCoarseToFine(hypre_BoxIMin(&rbox), zero_shift, rfactors, hypre_BoxIMin(&rbox)); hypre_StructMapCoarseToFine(hypre_BoxIMax(&rbox), zero_shift, rfactors, hypre_BoxIMax(&rbox)); hypre_AddIndexes(lower_shift, hypre_BoxIMin(&rbox), 3, hypre_BoxIMin(&rbox)); hypre_AddIndexes(upper_shift, hypre_BoxIMax(&rbox), 3, hypre_BoxIMax(&rbox)); /* Determine, if any, boundary layers for this rbox. Since the boundaries of the coarser levels may not be physical, we cannot use hypre_BoxBoundaryDG. But accomplished through intersecting with the finer level boundary boxes. */ fbdry= bdry[i-1][j][0]; /* cell-centred boundary layers of level (i-1) */ cbdry= bdry[i][boxi][0]; /* cell-centred boundary layers of level i */ /* fbdry is the cell-centred box_arrayarray. Contains an array of (2*ndim) boxarrays, one for each direction. */ cnt= 0; hypre_ForBoxArrayI(l, fbdry) { /* determine which boundary side we are doing. Depending on the boundary, when we coarsen the refined boundary layer, the extents may need to be changed, e.g., index[lower,j,k]= index[upper,j,k]. */ switch(l) { case 0: /* lower x direction, x_upper= x_lower */ { n= 1; /* n flags whether upper or lower to be replaced */ d= 0; /* x component */ break; } case 1: /* upper x direction, x_lower= x_upper */ { n= 0; /* n flags whether upper or lower to be replaced */ d= 0; /* x component */ break; } case 2: /* lower y direction, y_upper= y_lower */ { n= 1; /* n flags whether upper or lower to be replaced */ d= 1; /* y component */ break; } case 3: /* upper y direction, y_lower= y_upper */ { n= 0; /* n flags whether upper or lower to be replaced */ d= 1; /* y component */ break; } case 4: /* lower z direction, z_lower= z_upper */ { n= 1; /* n flags whether upper or lower to be replaced */ d= 2; /* z component */ break; } case 5: /* upper z direction, z_upper= z_lower */ { n= 0; /* n flags whether upper or lower to be replaced */ d= 2; /* z component */ break; } } box_array= hypre_BoxArrayArrayBoxArray(fbdry, l); hypre_ForBoxI(p, box_array) { hypre_IntersectBoxes(hypre_BoxArrayBox(box_array, p), &rbox, &intersect); if (hypre_BoxVolume(&intersect)) { /* coarsen the refined boundary box and append it to boxarray hypre_BoxArrayArrayBoxArray(cbdry, l) */ hypre_ProjectBox(&intersect, zero_shift, rfactors); hypre_StructMapFineToCoarse(hypre_BoxIMin(&intersect), zero_shift, rfactors, hypre_BoxIMin(&intersect)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&intersect), zero_shift, rfactors, hypre_BoxIMax(&intersect)); /* the coarsened intersect box may be incorrect because of the box projecting formulas. */ if (n) /* replace upper by lower */ { hypre_BoxIMax(&intersect)[d]= hypre_BoxIMin(&intersect)[d]; } else /* replace lower by upper */ { hypre_BoxIMin(&intersect)[d]= hypre_BoxIMax(&intersect)[d]; } hypre_AppendBox(&intersect, hypre_BoxArrayArrayBoxArray(cbdry, l)); cnt++; /* counter to signal boundary layers for cbox boxi */ } /* if (hypre_BoxVolume(&intersect)) */ } /* hypre_ForBoxI(p, box_array) */ } /* hypre_ForBoxArrayI(l, fbdry) */ /* All the boundary box_arrayarrays have been checked for coarse boxi. Now get the variable boundary layers if any, count the number of boundary points, and appropriately mark boxi. */ if (cnt) { hypre_Maxwell_VarBdy(pgrid, bdry[i][boxi]); for (p= 0; p< nvars; p++) { cbdry= bdry[i][boxi][p+1]; hypre_ForBoxArrayI(l, cbdry) { box_array= hypre_BoxArrayArrayBoxArray(cbdry, l); hypre_ForBoxI(m, box_array) { cbox= hypre_BoxArrayBox(box_array, m); npts[i]+= hypre_BoxVolume(cbox); } } } boxes_with_bdry[i][boxi]= 1; /* mark as containing boundary */ } } /* if (boxes_with_bdry[i-1][j]) */ } /* for (j= 0; j< nfboxes; j++) */ nfboxes= nboxes; } /* for (i= 1; i< num_levels; i++) */ /* de-allocate objects that are not needed anymore */ for (i= 0; i< (num_levels-1); i++) { if (fbox_mapping[i]) { hypre_TFree(fbox_mapping[i]); } if (cbox_mapping[i+1]) { hypre_TFree(cbox_mapping[i+1]); } grid = grid_l[i+1]; pgrid= hypre_SStructGridPGrid(grid, 0); /* assuming one part */ cell_cgrid= hypre_SStructPGridCellSGrid(pgrid); cboxes= hypre_StructGridBoxes(cell_cgrid); nboxes= hypre_BoxArraySize(hypre_StructGridBoxes(cell_cgrid)); } if (num_levels > 1) { hypre_TFree(fbox_mapping); hypre_TFree(cbox_mapping); } /* find the ranks for the boundary points */ BdryRanks_l = hypre_TAlloc(HYPRE_Int *, num_levels); BdryRanksCnts_l= hypre_TAlloc(HYPRE_Int , num_levels); /* loop over levels and extract boundary ranks. Only extract unique ranks */ for (i= 0; i< num_levels; i++) { grid= grid_l[i]; pgrid= hypre_SStructGridPGrid(grid, 0); /* assuming one part */ cell_cgrid= hypre_SStructPGridCellSGrid(pgrid); nvars= hypre_SStructPGridNVars(pgrid); cboxes= hypre_StructGridBoxes(cell_cgrid); nboxes= hypre_BoxArraySize(hypre_StructGridBoxes(cell_cgrid)); ranks= hypre_TAlloc(HYPRE_Int, npts[i]); cnt= 0; for (j= 0; j< nboxes; j++) { if (boxes_with_bdry[i][j]) { for (k= 0; k< nvars; k++) { fbdry= bdry[i][j][k+1]; hypre_ForBoxArrayI(m, fbdry) { box_array= hypre_BoxArrayArrayBoxArray(fbdry, m); hypre_ForBoxI(p, box_array) { box= hypre_BoxArrayBox(box_array, p); hypre_BoxGetSize(box, loop_size); hypre_CopyIndex(hypre_BoxIMin(box), start); hypre_BoxLoop0Begin(ndim, loop_size); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,index,boxman_entry,cnt) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop0For() { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(index, lindex[0], lindex[1], lindex[2]); hypre_AddIndexes(index, start, 3, index); hypre_SStructGridFindBoxManEntry(grid, part, index, k, &boxman_entry); hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index, &ranks[cnt], matrix_type); cnt++; } hypre_BoxLoop0End(); } /* hypre_ForBoxI(p, box_array) */ } /* hypre_ForBoxArrayI(m, fbdry) */ } /* for (k= 0; k< nvars; k++) */ } /* if (boxes_with_bdry[i][j]) */ for (k= 0; k< nvars; k++) { hypre_BoxArrayArrayDestroy(bdry[i][j][k+1]); } hypre_BoxArrayArrayDestroy(bdry[i][j][0]); hypre_TFree(bdry[i][j]); } /* for (j= 0; j< nboxes; j++) */ hypre_TFree(bdry[i]); /* mark all ranks that are outside this processor to -1 */ for (j= 0; j< cnt; j++) { if ( (ranks[j] < lower_rank[i]) || (ranks[j] > upper_rank[i]) ) { ranks[j]= -1; } } /* sort the ranks & extract the unique ones */ if (cnt) /* recall that some may not have bdry pts */ { hypre_qsort0(ranks, 0, cnt-1); k= 0; if (ranks[0] < 0) /* remove the off-processor markers */ { for (j= 1; j< cnt; j++) { if (ranks[j] > -1) { k= j; break; } } } l= 1; for (j= k+1; j< cnt; j++) { if (ranks[j] != ranks[j-1]) { l++; } } BdryRanks_l[i]= hypre_TAlloc(HYPRE_Int, l); BdryRanksCnts_l[i]= l; l= 0; BdryRanks_l[i][l]= ranks[k]-lower_rank[i]; for (j= k+1; j< cnt; j++) { if (ranks[j] != ranks[j-1]) { l++; BdryRanks_l[i][l]= ranks[j]-lower_rank[i]; /* store local ranks */ } } } else /* set BdryRanks_l[i] to be null */ { BdryRanks_l[i]= NULL; BdryRanksCnts_l[i]= 0; } hypre_TFree(ranks); hypre_TFree(boxes_with_bdry[i]); } /* for (i= 0; i< num_levels; i++) */ hypre_TFree(boxes_with_bdry); hypre_TFree(lower_rank); hypre_TFree(upper_rank); hypre_TFree(bdry); hypre_TFree(npts); *BdryRanksl_ptr = BdryRanks_l; *BdryRanksCntsl_ptr= BdryRanksCnts_l; return ierr; } /*----------------------------------------------------------------------------- * Determine the variable boundary layers using the cell-centred boundary * layers. The cell-centred boundary layers are located in bdry[0], a * hypre_BoxArrayArray of size 2*ndim, one array for the upper side and one * for the lower side, for each direction. *-----------------------------------------------------------------------------*/ HYPRE_Int hypre_Maxwell_VarBdy( hypre_SStructPGrid *pgrid, hypre_BoxArrayArray **bdry ) { HYPRE_Int ierr = 0; HYPRE_Int nvars= hypre_SStructPGridNVars(pgrid); hypre_BoxArrayArray *cell_bdry= bdry[0]; hypre_BoxArray *box_array, *box_array2; hypre_Box *bdy_box, *shifted_box; HYPRE_SStructVariable *vartypes = hypre_SStructPGridVarTypes(pgrid); hypre_Index varoffset, ishift, jshift, kshift; hypre_Index lower, upper; HYPRE_Int ndim = hypre_SStructPGridNDim(pgrid); HYPRE_Int i, k, t; hypre_SetIndex3(ishift, 1, 0, 0); hypre_SetIndex3(jshift, 0, 1, 0); hypre_SetIndex3(kshift, 0, 0, 1); shifted_box= hypre_BoxCreate(ndim); for (i= 0; i< nvars; i++) { t= vartypes[i]; hypre_SStructVariableGetOffset(vartypes[i], ndim, varoffset); switch(t) { case 2: /* xface, boundary i= lower, upper */ { /* boundary i= lower */ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 0); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 0); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, varoffset, 3, lower); hypre_SubtractIndexes(upper, varoffset, 3, upper); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } /* boundary i= upper */ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 1); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 1); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } break; } case 3: /* yface, boundary j= lower, upper */ { box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 2); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 0); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, varoffset, 3, lower); hypre_SubtractIndexes(upper, varoffset, 3, upper); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 3); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 1); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } break; } case 5: /* xedge, boundary z_faces & y_faces */ { /* boundary k= lower zface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 4); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 0); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, varoffset, 3, lower); hypre_SubtractIndexes(upper, kshift, 3, upper); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } /* boundary k= upper zface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 5); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 1); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, jshift, 3, lower); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } /* boundary j= lower yface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 2); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 2); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, varoffset, 3, lower); hypre_SubtractIndexes(upper, jshift, 3, upper); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } /* boundary j= upper yface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 3); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 3); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, kshift, 3, lower); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } break; } case 6: /* yedge, boundary z_faces & x_faces */ { /* boundary k= lower zface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 4); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 0); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, varoffset, 3, lower); hypre_SubtractIndexes(upper, kshift, 3, upper); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } /* boundary k= upper zface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 5); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 1); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, ishift, 3, lower); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } /* boundary i= lower xface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 0); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 2); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, varoffset, 3, lower); hypre_SubtractIndexes(upper, ishift, 3, upper); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } /* boundary i= upper xface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 1); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 3); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, kshift, 3, lower); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } break; } case 7: /* zedge, boundary y_faces & x_faces */ { /* boundary j= lower yface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 2); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 0); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, varoffset, 3, lower); hypre_SubtractIndexes(upper, jshift, 3, upper); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } /* boundary j= upper yface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 3); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 1); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, ishift, 3, lower); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } /* boundary i= lower xface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 0); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 2); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, varoffset, 3, lower); hypre_SubtractIndexes(upper, ishift, 3, upper); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } /* boundary i= upper xface*/ box_array= hypre_BoxArrayArrayBoxArray(cell_bdry, 1); if (hypre_BoxArraySize(box_array)) { box_array2= hypre_BoxArrayArrayBoxArray(bdry[i+1], 3); hypre_ForBoxI(k, box_array) { bdy_box= hypre_BoxArrayBox(box_array, k); /* bdry boxes */ hypre_CopyIndex(hypre_BoxIMin(bdy_box), lower); hypre_CopyIndex(hypre_BoxIMax(bdy_box), upper); hypre_SubtractIndexes(lower, jshift, 3, lower); hypre_BoxSetExtents(shifted_box, lower, upper); hypre_AppendBox(shifted_box, box_array2); } } break; } } /* switch(t) */ } /* for (i= 0; i< nvars; i++) */ hypre_BoxDestroy(shifted_box); return ierr; }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #elif defined(MAGICKCORE_HAVE_LCMS2_H) #include <wchar.h> #include "lcms2.h" #elif defined(MAGICKCORE_HAVE_LCMS_LCMS_H) #include <lcms/lcms.h> #else #include "lcms.h" #endif #endif /* Define declarations. */ #if !defined(LCMS_VERSION) || (LCMS_VERSION < 2000) #define cmsSigCmykData icSigCmykData #define cmsSigGrayData icSigGrayData #define cmsSigLabData icSigLabData #define cmsSigLuvData icSigLuvData #define cmsSigRgbData icSigRgbData #define cmsSigXYZData icSigXYZData #define cmsSigYCbCrData icSigYCbCrData #define cmsSigLinkClass icSigLinkClass #define cmsColorSpaceSignature icColorSpaceSignature #define cmsUInt32Number DWORD #define cmsSetLogErrorHandler(handler) cmsSetErrorHandler(handler) #define cmsCreateTransformTHR(context,source_profile,source_type, \ target_profile,target_type,intent,flags) cmsCreateTransform(source_profile, \ source_type,target_profile,target_type,intent,flags); #define cmsOpenProfileFromMemTHR(context,profile,length) \ cmsOpenProfileFromMem(profile,length) #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickSignature); image->color_profile.length=clone_image->color_profile.length; image->color_profile.info=clone_image->color_profile.info; image->iptc_profile.length=clone_image->iptc_profile.length; image->iptc_profile.info=clone_image->iptc_profile.info; if (clone_image->profiles != (void *) NULL) image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { char key[MaxTextExtent]; const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); (void) CopyMagickString(key,name,MaxTextExtent); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,key); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) static unsigned short **DestroyPixelThreadSet(unsigned short **pixels) { register ssize_t i; assert(pixels != (unsigned short **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (unsigned short *) NULL) pixels[i]=(unsigned short *) RelinquishMagickMemory(pixels[i]); pixels=(unsigned short **) RelinquishMagickMemory(pixels); return(pixels); } static unsigned short **AcquirePixelThreadSet(const size_t columns, const size_t channels) { register ssize_t i; unsigned short **pixels; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(unsigned short **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (unsigned short **) NULL) return((unsigned short **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(unsigned short *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (unsigned short *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image, const cmsHPROFILE source_profile,const cmsUInt32Number source_type, const cmsHPROFILE target_profile,const cmsUInt32Number target_type, const int intent,const cmsUInt32Number flags) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(image,source_profile,source_type, target_profile,target_type,intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } #endif #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(LCMS_VERSION) && (LCMS_VERSION >= 2000) static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { Image *image; (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); image=(Image *) context; if (image != (Image *) NULL) (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageWarning,"UnableToTransformColorspace","`%s'",image->filename); } #else static int LCMSExceptionHandler(int severity,const char *message) { (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%d, %s", severity,message != (char *) NULL ? message : "no message"); return(1); } #endif #endif MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length, const MagickBooleanType magick_unused(clone)) { #define ProfileImageTag "Profile/Image" #define ThrowProfileException(severity,tag,context) \ { \ if (source_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_profile); \ if (target_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char **arguments, *names; int number_arguments; register ssize_t i; /* Delete image profile(s). */ names=ConstantString(name); (void) SubstituteString(&names,","," "); arguments=StringToArgv(names,&number_arguments); names=DestroyString(names); if (arguments == (char **) NULL) return(MagickTrue); ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { for (i=1; i < (ssize_t) number_arguments; i++) { if ((*arguments[i] == '!') && (LocaleCompare(name,arguments[i]+1) == 0)) break; if (GlobExpression(name,arguments[i],MagickTrue) != MagickFalse) { (void) DeleteImageProfile(image,name); ResetImageProfileIterator(image); break; } } name=GetNextImageProfile(image); } for (i=0; i < (ssize_t) number_arguments; i++) arguments[i]=DestroyString(arguments[i]); arguments=(char **) RelinquishMagickMemory(arguments); return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace"); (void) value; /* Future. if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image); value=GetImageProperty(image,"exif:InteroperabilityIndex"); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image); value=GetImageProperty(image,"exif:InteroperabilityIndex"); if (LocaleCompare(value,"R03.") != 0) (void) SetAdobeRGB1998ImageProfile(image); */ icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(&image->exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)", image->filename); #else { cmsHPROFILE source_profile; /* Transform pixel colors as defined by the color profiles. */ cmsSetLogErrorHandler(LCMSExceptionHandler); source_profile=cmsOpenProfileFromMemTHR(image, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile); else { CacheView *image_view; ColorspaceType source_colorspace, target_colorspace; cmsColorSpaceSignature signature; cmsHPROFILE target_profile; cmsHTRANSFORM *restrict transform; cmsUInt32Number flags, source_type, target_type; ExceptionInfo *exception; int intent; MagickBooleanType status; MagickOffsetType progress; size_t source_channels, target_channels; ssize_t y; unsigned short **restrict source_pixels, **restrict target_pixels; exception=(&image->exception); target_profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_profile=source_profile; source_profile=cmsOpenProfileFromMemTHR(image, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } switch (cmsGetColorSpace(source_profile)) { case cmsSigCmykData: { source_colorspace=CMYKColorspace; source_type=(cmsUInt32Number) TYPE_CMYK_16; source_channels=4; break; } case cmsSigGrayData: { source_colorspace=GRAYColorspace; source_type=(cmsUInt32Number) TYPE_GRAY_16; source_channels=1; break; } case cmsSigLabData: { source_colorspace=LabColorspace; source_type=(cmsUInt32Number) TYPE_Lab_16; source_channels=3; break; } case cmsSigLuvData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YUV_16; source_channels=3; break; } case cmsSigRgbData: { source_colorspace=sRGBColorspace; source_type=(cmsUInt32Number) TYPE_RGB_16; source_channels=3; break; } case cmsSigXYZData: { source_colorspace=XYZColorspace; source_type=(cmsUInt32Number) TYPE_XYZ_16; source_channels=3; break; } case cmsSigYCbCrData: { source_colorspace=YCbCrColorspace; source_type=(cmsUInt32Number) TYPE_YCbCr_16; source_channels=3; break; } default: { source_colorspace=UndefinedColorspace; source_type=(cmsUInt32Number) TYPE_RGB_16; source_channels=3; break; } } signature=cmsGetPCS(source_profile); if (target_profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_profile); switch (signature) { case cmsSigCmykData: { target_colorspace=CMYKColorspace; target_type=(cmsUInt32Number) TYPE_CMYK_16; target_channels=4; break; } case cmsSigLabData: { target_colorspace=LabColorspace; target_type=(cmsUInt32Number) TYPE_Lab_16; target_channels=3; break; } case cmsSigGrayData: { target_colorspace=GRAYColorspace; target_type=(cmsUInt32Number) TYPE_GRAY_16; target_channels=1; break; } case cmsSigLuvData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YUV_16; target_channels=3; break; } case cmsSigRgbData: { target_colorspace=sRGBColorspace; target_type=(cmsUInt32Number) TYPE_RGB_16; target_channels=3; break; } case cmsSigXYZData: { target_colorspace=XYZColorspace; target_type=(cmsUInt32Number) TYPE_XYZ_16; target_channels=3; break; } case cmsSigYCbCrData: { target_colorspace=YCbCrColorspace; target_type=(cmsUInt32Number) TYPE_YCbCr_16; target_channels=3; break; } default: { target_colorspace=UndefinedColorspace; target_type=(cmsUInt32Number) TYPE_RGB_16; target_channels=3; break; } } if ((source_colorspace == UndefinedColorspace) || (target_colorspace == UndefinedColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == GRAYColorspace) && (IsGrayImage(image,exception) == MagickFalse)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == CMYKColorspace) && (image->colorspace != CMYKColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == XYZColorspace) && (image->colorspace != XYZColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == YCbCrColorspace) && (image->colorspace != YCbCrColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace != CMYKColorspace) && (source_colorspace != GRAYColorspace) && (source_colorspace != LabColorspace) && (source_colorspace != XYZColorspace) && (source_colorspace != YCbCrColorspace) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); switch (image->rendering_intent) { case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break; case PerceptualIntent: intent=INTENT_PERCEPTUAL; break; case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break; case SaturationIntent: intent=INTENT_SATURATION; break; default: intent=INTENT_PERCEPTUAL; break; } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_pixels=AcquirePixelThreadSet(image->columns,source_channels); target_pixels=AcquirePixelThreadSet(image->columns,target_channels); if ((source_pixels == (unsigned short **) NULL) || (target_pixels == (unsigned short **) NULL)) { transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (source_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); return(MagickFalse); } if (target_colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_colorspace); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; register unsigned short *p; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); p=source_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=ScaleQuantumToShort(GetPixelRed(q)); if (source_channels > 1) { *p++=ScaleQuantumToShort(GetPixelGreen(q)); *p++=ScaleQuantumToShort(GetPixelBlue(q)); } if (source_channels > 3) *p++=ScaleQuantumToShort(GetPixelIndex(indexes+x)); q++; } cmsDoTransform(transform[id],source_pixels[id],target_pixels[id], (unsigned int) image->columns); p=target_pixels[id]; q-=image->columns; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleShortToQuantum(*p)); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); p++; if (target_channels > 1) { SetPixelGreen(q,ScaleShortToQuantum(*p)); p++; SetPixelBlue(q,ScaleShortToQuantum(*p)); p++; } if (target_channels > 3) { SetPixelIndex(indexes+x,ScaleShortToQuantum(*p)); p++; } q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ProfileImage) #endif proceed=SetImageProgress(image,ProfileImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_colorspace); switch (signature) { case cmsSigRgbData: { image->type=image->matte == MagickFalse ? TrueColorType : TrueColorMatteType; break; } case cmsSigCmykData: { image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; break; } case cmsSigGrayData: { image->type=image->matte == MagickFalse ? GrayscaleType : GrayscaleMatteType; break; } default: break; } target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (cmsGetDeviceClass(source_profile) != cmsSigLinkClass) status=SetImageProfile(image,name,profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); } (void) cmsCloseProfile(source_profile); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceBytes(const unsigned char *p, const ssize_t count,unsigned char *quantum) { register ssize_t i; for (i=0; i < count; i++) *quantum++=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, size_t *quantum) { *quantum=(size_t) (*p++ << 24); *quantum|=(size_t) (*p++ << 16); *quantum|=(size_t) (*p++ << 8); *quantum|=(size_t) (*p++ << 0); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++ << 8); *quantum|=(unsigned short) (*p++ << 0); return(p); } static MagickBooleanType GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block) { const unsigned char *datum; register const unsigned char *p; size_t length; StringInfo *profile; unsigned char length_byte; size_t count; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&count); if ((p > (datum+length-count)) || (count > length)) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution. */ p=ReadResourceShort(p,&resolution)+6; image->x_resolution=(double) resolution; p=ReadResourceShort(p,&resolution)+6; image->y_resolution=(double) resolution; break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"iptc",profile); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"exif",profile); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"xmp",profile); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } return(MagickTrue); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile) { char key[MaxTextExtent], property[MaxTextExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MaxTextExtent); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if ((status != MagickFalse) && ((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0))) { const StringInfo *icc_profile; /* Continue to support deprecated color profile member. */ icc_profile=GetImageProfile(image,name); if (icc_profile != (const StringInfo *) NULL) { image->color_profile.length=GetStringInfoLength(icc_profile); image->color_profile.info=GetStringInfoDatum(icc_profile); } } if ((status != MagickFalse) && ((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0))) { const StringInfo *iptc_profile; /* Continue to support deprecated IPTC profile member. */ iptc_profile=GetImageProfile(image,name); if (iptc_profile != (const StringInfo *) NULL) { image->iptc_profile.length=GetStringInfoLength(iptc_profile); image->iptc_profile.info=GetStringInfoDatum(iptc_profile); } (void) GetProfilesFromResourceBlock(image,profile); } /* Inject profile into image properties. */ (void) FormatLocaleString(property,MaxTextExtent,"%s:sans",name); (void) GetImageProperty(image,property); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline unsigned short ReadProfileShort(const EndianType endian, unsigned char *buffer) { unsigned short value; if (endian == LSBEndian) { value=(unsigned short) ((buffer[1] << 8) | buffer[0]); return((unsigned short) (value & 0xffff)); } value=(unsigned short) ((((unsigned char *) buffer)[0] << 8) | ((unsigned char *) buffer)[1]); return((unsigned short) (value & 0xffff)); } static inline size_t ReadProfileLong(const EndianType endian, unsigned char *buffer) { size_t value; if (endian == LSBEndian) { value=(size_t) ((buffer[3] << 24) | (buffer[2] << 16) | (buffer[1] << 8 ) | (buffer[0])); return((size_t) (value & 0xffffffff)); } value=(size_t) ((buffer[0] << 24) | (buffer[1] << 16) | (buffer[2] << 8) | buffer[3]); return((size_t) (value & 0xffffffff)); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) CopyMagickMemory(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) CopyMagickMemory(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) CopyMagickMemory(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) CopyMagickMemory(p,buffer,2); } MagickExport MagickBooleanType SyncImageProfiles(Image *image) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; StringInfo *profile; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile == (StringInfo *) NULL) return(MagickTrue); length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ((int) ReadProfileLong(endian,exif+4)); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { register unsigned char *p, *q; size_t number_bytes; ssize_t components, format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format-1) >= EXIF_NUM_FORMATS) break; components=(ssize_t) ((int) ReadProfileLong(endian,q+4)); number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { ssize_t offset; /* The directory entry contains an offset. */ offset=(ssize_t) ((int) ReadProfileLong(endian,q+8)); if ((ssize_t) (offset+number_bytes) < offset) continue; /* prevent overflow */ if ((size_t) (offset+number_bytes) > length) continue; p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { ssize_t offset; offset=(ssize_t) ((int) ReadProfileLong(endian,p)); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ((int) ReadProfileLong(endian,directory+2+(12* number_entries))); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); }
rose_accumulateForce.c
#include "omp.h" void AccumulateForce(int *idxBound,int *idxList,int len,double *tmp,double *force) { #pragma omp parallel for private (jj) firstprivate (len) for (register int ii = 0; ii <= len - 1; ii += 1) { int count = idxBound[ii + 1] - idxBound[ii]; int *list = &idxList[idxBound[ii]]; double sum = 0.0; #pragma omp parallel for reduction (+:sum) firstprivate (count) for (register int jj = 0; jj <= count - 1; jj += 1) { int idx = list[jj]; sum += tmp[list[jj]]; } force[ii] += sum; } return ; }
omp_master_3.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" int test_omp_master_3() { int nthreads; int executing_thread; int tid_result = 0; /* counts up the number of wrong thread no. for the master thread. (Must be 0) */ nthreads = 0; executing_thread = -1; #pragma omp parallel { #pragma omp master { int tid = omp_get_thread_num(); if (tid != 0) { #pragma omp critical { tid_result++; } } #pragma omp critical { nthreads++; } executing_thread = omp_get_thread_num (); } /* end of master*/ } /* end of parallel*/ return ((nthreads == 1) && (executing_thread == 0) && (tid_result == 0)); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_master_3()) { num_failed++; } } return num_failed; }
matmul.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ void matmul(level_type * level, double *C, int * id_A, int * id_B, int rows, int cols, int A_equals_B_transpose){ // *id_A = m vector_id's (conceptually pointers to the rows of a m x level->num_my_boxes*volume matrix) // *id_B = n vector_id's (conceptually pointers to the columns of a level->num_my_boxes*volume matrix x n) // *C is a mxn matrix where C[rows][cols] = dot(id_A[rows],id_B[cols]) // FIX, id_A and id_B are likely the same and thus C[][] will be symmetric (modulo missing row?) // if(A_equals_B_transpose && (cols>=rows)) then use id_B and only run for nn>=mm // common case for s-step Krylov methods // C_is_symmetric && cols< rows (use id_A) int mm,nn; uint64_t _timeStart = CycleTime(); // FIX... rather than performing an all_reduce on the essentially symmetric [G,g], do the all_reduce on the upper triangle and then duplicate (saves BW) // #pragma omp parallel for schedule(static,1) collapse(2) hclib::finish([&rows, &cols, &level, &id_A, &C, &id_B] { hclib::loop_domain_2d loop(rows, cols); hclib::forasync2D_nb(&loop, [&level, &id_A, &C, &id_B, &cols, &rows] (int mm, int nn) { if(nn>=mm){ // upper triangular int box; double a_dot_b_level = 0.0; for(box=0;box<level->num_my_boxes;box++){ int i,j,k; box_type *lbox = (box_type *)&(level->my_boxes[box]); const int jStride = lbox->jStride; const int kStride = lbox->kStride; const int ghosts = lbox->ghosts; const int dim = lbox->dim; double * __restrict__ grid_a = (double *) (lbox->vectors[id_A[mm]] + ghosts*(1+jStride+kStride)); // i.e. [0] = first non ghost zone point double * __restrict__ grid_b = (double *) (lbox->vectors[id_B[nn]] + ghosts*(1+jStride+kStride)); double a_dot_b_box = 0.0; for(k=0;k<dim;k++){ for(j=0;j<dim;j++){ for(i=0;i<dim;i++){ int ijk = i + j*jStride + k*kStride; a_dot_b_box += grid_a[ijk]*grid_b[ijk]; }}} a_dot_b_level+=a_dot_b_box; } C[mm*cols + nn] = a_dot_b_level; // C[mm][nn] if((mm<cols)&&(nn<rows)){C[nn*cols + mm] = a_dot_b_level;}// C[nn][mm] } }, false, FORASYNC_MODE_FLAT); }); level->cycles.blas3 += (uint64_t)(CycleTime()-_timeStart); #ifdef USE_MPI double *send_buffer = (double*)malloc(rows*cols*sizeof(double)); for(mm=0;mm<rows;mm++){ for(nn=0;nn<cols;nn++){ send_buffer[mm*cols + nn] = C[mm*cols + nn]; }} uint64_t _timeStartAllReduce = CycleTime(); hclib::MPI_Allreduce(send_buffer,C,rows*cols,MPI_DOUBLE,MPI_SUM,level->MPI_COMM_ALLREDUCE); uint64_t _timeEndAllReduce = CycleTime(); level->cycles.collectives += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce); free(send_buffer); #endif }
partial.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildPartialExtPIInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildPartialExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_BigInt *num_old_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] -= hypre_MPI_Wtime(); #endif /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; /*HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL;*/ HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *old_coarse_to_fine = NULL; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; HYPRE_Int sgn; /* Variables to keep count of interpolatory points */ /*HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter, coarse_counter_offd; */ HYPRE_Int n_coarse_old; HYPRE_BigInt total_old_global_cpts; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; /*HYPRE_Int strong_f_marker = -2;*/ /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int cnt, old_cnt; HYPRE_Int start_indexing = 0; HYPRE_Int i; /*HYPRE_Int i, ii, i1, i2, j, jj, kk, k1, jj1;*/ /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; HYPRE_Int max_num_threads; HYPRE_Int *P_diag_array = NULL; HYPRE_Int *P_offd_array = NULL; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); max_num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; /*my_first_old_cpt = num_old_cpts_global[0];*/ n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]); /*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/ if (my_id == (num_procs -1)) { total_global_cpts = num_cpts_global[1]; total_old_global_cpts = num_old_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; /*my_first_old_cpt = num_old_cpts_global[my_id];*/ total_global_cpts = num_cpts_global[num_procs]; total_old_global_cpts = num_old_cpts_global[num_procs]; n_coarse_old = (HYPRE_Int)(num_old_cpts_global[my_id+1] - num_old_cpts_global[my_id]); /*n_coarse = num_cpts_global[my_id+1] - num_cpts_global[my_id];*/ #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { if (hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1)) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); if (n_fine) { old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); /*P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); */ } if (full_off_procNodes) { /*P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);*/ fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } /*hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd);*/ for (i=0; i < full_off_procNodes; i++) { fine_to_coarse_offd[i] = -1; tmp_CF_marker_offd[i] = -1; } cnt = 0; old_cnt = 0; for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt++; old_coarse_to_fine[old_cnt++] = i; } else if (CF_marker[i] == -2) { old_coarse_to_fine[old_cnt++] = i; } } P_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST); P_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i, diagonal, distribute, sgn, sum) #endif { HYPRE_Int ii, jj_counter, jj_counter_offd, jj, kk, i1, i2, k1, jj1; HYPRE_BigInt big_k1; HYPRE_Int loc_col, jj_begin_row, jj_begin_row_offd; HYPRE_Int jj_end_row, jj_end_row_offd, strong_f_marker; HYPRE_Int size, rest, ne, ns; HYPRE_Int num_threads, my_thread_num; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; strong_f_marker = -2; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); size = n_coarse_old/num_threads; rest = n_coarse_old - size*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(size+1); ne = (my_thread_num+1)*(size+1); } else { ns = my_thread_num*size+rest; ne = (my_thread_num+1)*size+rest; } if (n_fine) P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (ii=0; ii < n_fine; ii++) P_marker[ii] = -1; if (full_off_procNodes) P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); for (ii=0; ii < full_off_procNodes; ii++) P_marker_offd[ii] = -1; /*coarse_counter = 0; coarse_counter_offd = 0;*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; for (ii = ns; ii < ne; ii++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; /*P_diag_i[ii] = jj_counter; if (num_procs > 1) P_offd_i[ii] = jj_counter_offd;*/ i = old_coarse_to_fine[ii]; if (CF_marker[i] > 0) { jj_counter++; /*coarse_counter++;*/ } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] > 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } P_diag_array[my_thread_num] = jj_counter; P_offd_array[my_thread_num] = jj_counter_offd; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); for (i=0; i < max_num_threads; i++) { P_diag_array[i+1] += P_diag_array[i]; P_offd_array[i+1] += P_offd_array[i]; } P_diag_size = P_diag_array[max_num_threads]; P_offd_size = P_offd_array[max_num_threads]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); } P_diag_i[n_coarse_old] = P_diag_size; P_offd_i[n_coarse_old] = P_offd_size; /* Fine to coarse mapping */ if(num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif jj_counter = start_indexing; jj_counter_offd = start_indexing; if (my_thread_num) { jj_counter = P_diag_array[my_thread_num-1]; jj_counter_offd = P_offd_array[my_thread_num-1]; } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (ii = ns; ii < ne; ii++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; P_diag_i[ii] = jj_counter; P_offd_i[ii] = jj_counter_offd; i = old_coarse_to_fine[ii]; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if(big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; if(i2 == i && (sgn*A_diag_data[jj1]) < 0) diagonal += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if(big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if(P_marker[loc_col] >= jj_begin_row || loc_col == i) sum += A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if(big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if(P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; if(loc_col == i) diagonal += distribute*A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } /* end parallel region */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, total_old_global_cpts, total_global_cpts, num_old_cpts_global, num_cpts_global, 0, P_diag_i[n_coarse_old], P_offd_i[n_coarse_old]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_coarse_old]; P_offd_size = P_offd_i[n_coarse_old]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] < -1) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(P_offd_array, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if(num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildPartialStdInterp * Comment: The interpolatory weighting can be changed with the sep_weight * variable. This can enable not separating negative and positive * off diagonals in the weight formula. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildPartialStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_BigInt *num_old_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int sep_weight, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *old_coarse_to_fine = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; HYPRE_Int n_coarse_old; HYPRE_BigInt total_old_global_cpts; HYPRE_Int *ihat = NULL; HYPRE_Int *ihat_offd = NULL; HYPRE_Int *ipnt = NULL; HYPRE_Int *ipnt_offd = NULL; HYPRE_Int strong_f_marker = -2; /* Interpolation weight variables */ HYPRE_Real *ahat = NULL; HYPRE_Real *ahat_offd = NULL; HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C; HYPRE_Real diagonal, distribute; HYPRE_Real alfa, beta; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int cnt, old_cnt; HYPRE_Int start_indexing = 0; HYPRE_Int i, ii, i1, j1, jj, kk, k1; HYPRE_BigInt big_k1; HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; HYPRE_Real wall_1 = 0; HYPRE_Real wall_2 = 0; HYPRE_Real wall_3 = 0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag== 4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; /*my_first_old_cpt = num_old_cpts_global[0];*/ n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]); /*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/ if (my_id == (num_procs -1)) { total_global_cpts = num_cpts_global[1]; total_old_global_cpts = num_old_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; /*my_first_old_cpt = num_old_cpts_global[my_id];*/ total_global_cpts = num_cpts_global[num_procs]; total_old_global_cpts = num_old_cpts_global[num_procs]; n_coarse_old = (HYPRE_Int)(num_old_cpts_global[my_id+1] - num_old_cpts_global[my_id]); /*n_coarse = num_cpts_global[my_id+1] - num_cpts_global[my_id];*/ #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { if (hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 0)) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); if (n_fine) { old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; cnt = 0; old_cnt = 0; for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt++; old_coarse_to_fine[old_cnt++] = i; } else if (CF_marker[i] == -2) { old_coarse_to_fine[old_cnt++] = i; } } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (ii = 0; ii < n_coarse_old; ii++) { P_diag_i[ii] = jj_counter; if (num_procs > 1) P_offd_i[ii] = jj_counter_offd; i = old_coarse_to_fine[ii]; if (CF_marker[i] > 0) { jj_counter++; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[ii]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if(P_marker[k1] < P_diag_i[ii]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < P_offd_i[ii]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] > 0) { if(P_marker_offd[i1] < P_offd_i[ii]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if(CF_marker[loc_col] >= 0) { if(P_marker[loc_col] < P_diag_i[ii]) { P_marker[loc_col] = jj_counter; jj_counter++; } } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(CF_marker_offd[loc_col] >= 0) { if(P_marker_offd[loc_col] < P_offd_i[ii]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); } P_diag_i[n_coarse_old] = jj_counter; P_offd_i[n_coarse_old] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } /* Initialize ahat, which is a modification to a, used in the standard * interpolation routine. */ if (n_fine) { ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST); ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; ahat[i] = 0; ihat[i] = -1; } for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1; ahat_offd[i] = 0; ihat_offd[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (ii = 0; ii < n_coarse_old; ii++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; i = old_coarse_to_fine[ii]; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = i1; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = k1; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] > 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd]=i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if(CF_marker[loc_col] > 0) { if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = loc_col; P_diag_data[jj_counter] = zero; jj_counter++; } } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(CF_marker_offd[loc_col] > 0) { if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_1 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); cnt_c = 0; cnt_f = jj_end_row-jj_begin_row; cnt_c_offd = 0; cnt_f_offd = jj_end_row_offd-jj_begin_row_offd; ihat[i] = cnt_f; ipnt[cnt_f] = i; ahat[cnt_f++] = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is direct neighbor */ i1 = A_diag_j[jj]; if (P_marker[i1] != strong_f_marker) { indx = ihat[i1]; if (indx > -1) ahat[indx] += A_diag_data[jj]; else if (P_marker[i1] >= jj_begin_row) { ihat[i1] = cnt_c; ipnt[cnt_c] = i1; ahat[cnt_c++] += A_diag_data[jj]; } else if (CF_marker[i1] != -3) { ihat[i1] = cnt_f; ipnt[cnt_f] = i1; ahat[cnt_f++] += A_diag_data[jj]; } } else { if(num_functions == 1 || dof_func[i] == dof_func[i1]) { distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]]; for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++) { k1 = A_diag_j[kk]; indx = ihat[k1]; if (indx > -1) ahat[indx] -= A_diag_data[kk]*distribute; else if (P_marker[k1] >= jj_begin_row) { ihat[k1] = cnt_c; ipnt[cnt_c] = k1; ahat[cnt_c++] -= A_diag_data[kk]*distribute; } else { ihat[k1] = cnt_f; ipnt[cnt_f] = k1; ahat[cnt_f++] -= A_diag_data[kk]*distribute; } } if(num_procs > 1) { for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++) { k1 = A_offd_j[kk]; indx = ihat_offd[k1]; if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1]) { if (indx > -1) ahat_offd[indx] -= A_offd_data[kk]*distribute; else if (P_marker_offd[k1] >= jj_begin_row_offd) { ihat_offd[k1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = k1; ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute; } else { ihat_offd[k1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = k1; ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute; } } } } } } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] != strong_f_marker) { indx = ihat_offd[i1]; if (indx > -1) ahat_offd[indx] += A_offd_data[jj]; else if (P_marker_offd[i1] >= jj_begin_row_offd) { ihat_offd[i1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = i1; ahat_offd[cnt_c_offd++] += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { ihat_offd[i1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = i1; ahat_offd[cnt_f_offd++] += A_offd_data[jj]; } } else { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]]; for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++) { big_k1 = A_ext_j[kk]; if(big_k1 >= col_1 && big_k1 < col_n) { /*diag*/ loc_col = (HYPRE_Int)(big_k1 - col_1); indx = ihat[loc_col]; if (indx > -1) ahat[indx] -= A_ext_data[kk]*distribute; else if (P_marker[loc_col] >= jj_begin_row) { ihat[loc_col] = cnt_c; ipnt[cnt_c] = loc_col; ahat[cnt_c++] -= A_ext_data[kk]*distribute; } else { ihat[loc_col] = cnt_f; ipnt[cnt_f] = loc_col; ahat[cnt_f++] -= A_ext_data[kk]*distribute; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(num_functions == 1 || dof_func_offd[loc_col] == dof_func_offd[i1]) { indx = ihat_offd[loc_col]; if (indx > -1) ahat_offd[indx] -= A_ext_data[kk]*distribute; else if(P_marker_offd[loc_col] >= jj_begin_row_offd) { ihat_offd[loc_col] = cnt_c_offd; ipnt_offd[cnt_c_offd] = loc_col; ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute; } else { ihat_offd[loc_col] = cnt_f_offd; ipnt_offd[cnt_f_offd] = loc_col; ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_2 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); diagonal = ahat[cnt_c]; ahat[cnt_c] = 0; sum_pos = 0; sum_pos_C = 0; sum_neg = 0; sum_neg_C = 0; sum = 0; sum_C = 0; if(sep_weight == 1) { for (jj=0; jj < cnt_c; jj++) { if (ahat[jj] > 0) { sum_pos_C += ahat[jj]; } else { sum_neg_C += ahat[jj]; } } if(num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos_C += ahat_offd[jj]; } else { sum_neg_C += ahat_offd[jj]; } } } sum_pos = sum_pos_C; sum_neg = sum_neg_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { if (ahat[jj] > 0) { sum_pos += ahat[jj]; } else { sum_neg += ahat[jj]; } ahat[jj] = 0; } if(num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos += ahat_offd[jj]; } else { sum_neg += ahat_offd[jj]; } ahat_offd[jj] = 0; } } if (sum_neg_C*diagonal) alfa = sum_neg/sum_neg_C/diagonal; if (sum_pos_C*diagonal) beta = sum_pos/sum_pos_C/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; if (ahat[j1] > 0) P_diag_data[jj] = -beta*ahat[j1]; else P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) ihat[ipnt[jj]] = -1; if(num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; if (ahat_offd[j1] > 0) P_offd_data[jj] = -beta*ahat_offd[j1]; else P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) ihat_offd[ipnt_offd[jj]] = -1; } } else { for (jj=0; jj < cnt_c; jj++) { sum_C += ahat[jj]; } if(num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { sum_C += ahat_offd[jj]; } } sum = sum_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { sum += ahat[jj]; ahat[jj] = 0; } if(num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { sum += ahat_offd[jj]; ahat_offd[jj] = 0; } } if (sum_C*diagonal) alfa = sum/sum_C/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) ihat[ipnt[jj]] = -1; if(num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) ihat_offd[ipnt_offd[jj]] = -1; } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_3 += wall_time; fflush(NULL); } } } if (debug_flag==4) { hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n", my_id, wall_1, wall_2, wall_3); fflush(NULL); } P = hypre_ParCSRMatrixCreate(comm, total_old_global_cpts, total_global_cpts, num_old_cpts_global, num_cpts_global, 0, P_diag_i[n_coarse_old], P_offd_i[n_coarse_old]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_coarse_old]; P_offd_size = P_offd_i[n_coarse_old]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] < -1) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(ahat, HYPRE_MEMORY_HOST); hypre_TFree(ihat, HYPRE_MEMORY_HOST); hypre_TFree(ipnt, HYPRE_MEMORY_HOST); if (full_off_procNodes) { hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST); hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST); hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST); } if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if(num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildPartialExtInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildPartialExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_BigInt *num_old_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *old_coarse_to_fine = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; HYPRE_Int sgn; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; HYPRE_Int n_coarse_old; HYPRE_BigInt total_old_global_cpts; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int cnt, old_cnt; HYPRE_Int start_indexing = 0; HYPRE_Int i, ii, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; /*my_first_old_cpt = num_old_cpts_global[0];*/ n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]); /*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/ if (my_id == (num_procs -1)) { total_global_cpts = num_cpts_global[1]; total_old_global_cpts = num_old_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; /*my_first_old_cpt = num_old_cpts_global[my_id];*/ total_global_cpts = num_cpts_global[num_procs]; total_old_global_cpts = num_old_cpts_global[num_procs]; n_coarse_old = (HYPRE_Int)(num_old_cpts_global[my_id+1] - num_old_cpts_global[my_id]); /*n_coarse = num_cpts_global[my_id+1] - num_cpts_global[my_id];*/ #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { if (hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1)) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); if (n_fine) { old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; cnt = 0; old_cnt = 0; for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt++; old_coarse_to_fine[old_cnt++] = i; } else if (CF_marker[i] == -2) { old_coarse_to_fine[old_cnt++] = i; } } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (ii = 0; ii < n_coarse_old; ii++) { P_diag_i[ii] = jj_counter; if (num_procs > 1) P_offd_i[ii] = jj_counter_offd; i = old_coarse_to_fine[ii]; if (CF_marker[i] > 0) { jj_counter++; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[ii]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if(P_marker[k1] < P_diag_i[ii]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < P_offd_i[ii]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] > 0) { if(P_marker_offd[i1] < P_offd_i[ii]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if(P_marker[loc_col] < P_diag_i[ii]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] < P_offd_i[ii]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); } P_diag_i[n_coarse_old] = jj_counter; P_offd_i[n_coarse_old] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (ii = 0; ii < n_coarse_old; ii++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; i = old_coarse_to_fine[ii]; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if(col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if(col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if(big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if((P_marker[i2] >= jj_begin_row) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if(big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if(P_marker[loc_col] >= jj_begin_row ) sum += A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if(big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if(P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, total_old_global_cpts, total_global_cpts, num_old_cpts_global, num_cpts_global, 0, P_diag_i[n_coarse_old], P_offd_i[n_coarse_old]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_coarse_old]; P_offd_size = P_offd_i[n_coarse_old]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] < -1) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if(num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; }
residualbased_predictorcorrector_velocity_bossak_scheme_turbulent.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME ) #define KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_VELOCITY_BOSSAK_TURBULENT_SCHEME /* System includes */ /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/dof_updater.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /// Bossak time scheme for the incompressible flow problem. /** This class provides a second order time scheme of the generalized-alpha Newmark family of methods. It also includes code required to implement slip conditions on the incompressible flow problem and provides the possibility of using a RANS model by passing a turbulence model as an argument to the constructor. This time scheme is intended to be used in combination with elements of type ASGS2D, ASGS3D, VMS or derived classes. To use the slip condition, set the SLIP flag on slip wall nodes. To use a wall law in combination with the slip condition, use MonolithicWallCondition to mesh the boundary @see ASGS2D, ASGS3D, VMS, MonolithicWallConditon */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent : public Scheme<TSparseSpace, TDenseSpace> { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = OpenMPUtils::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor without a turbulence model with periodic conditions */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, unsigned int DomainSize, const Variable<int>& rPeriodicIdVar) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(rPeriodicIdVar) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = 0.0; //Allocate auxiliary memory int NumThreads = OpenMPUtils::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor without a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, Kratos::Flags& rSlipFlag) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,rSlipFlag), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs. mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = OpenMPUtils::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor with a turbulence model */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()), mpTurbulenceModel(pTurbulenceModel) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; //Allocate auxiliary memory int NumThreads = OpenMPUtils::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Constructor with a turbulence model and relaxation factor */ ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent( double NewAlphaBossak, double MoveMeshStrategy, unsigned int DomainSize, const double RelaxationFactor, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mRotationTool(DomainSize,DomainSize+1,SLIP), // Second argument is number of matrix rows per node: monolithic elements have velocity and pressure dofs mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()), mpTurbulenceModel(pTurbulenceModel) { //default values for the Newmark Scheme mAlphaBossak = NewAlphaBossak; mBetaNewmark = 0.25 * pow((1.00 - mAlphaBossak), 2); mGammaNewmark = 0.5 - mAlphaBossak; mMeshVelocity = MoveMeshStrategy; mRelaxationFactor = RelaxationFactor; //Allocate auxiliary memory int NumThreads = OpenMPUtils::GetNumThreads(); mMass.resize(NumThreads); mDamp.resize(NumThreads); mvel.resize(NumThreads); macc.resize(NumThreads); maccold.resize(NumThreads); } /** Destructor. */ ~ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent() override { } /*@} */ /**@name Operators */ /*@{ */ /** Performing the update of the solution. */ //*************************************************************************** void Update(ModelPart& r_model_part, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { KRATOS_TRY; mRotationTool.RotateVelocities(r_model_part); TSparseSpace::InplaceMult(Dv, mRelaxationFactor); mpDofUpdater->UpdateDofs(rDofSet,Dv); mRotationTool.RecoverVelocities(r_model_part); AdditionalUpdateOperations(r_model_part, rDofSet, A, Dv, b); KRATOS_CATCH("") } //*************************************************************************** void AdditionalUpdateOperations(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) { KRATOS_TRY int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition); //updating time derivatives (nodally for efficiency) #pragma omp parallel { array_1d<double, 3 > DeltaVel; int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) { noalias(DeltaVel) = (itNode)->FastGetSolutionStepValue(VELOCITY) - (itNode)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration); if (mMeshVelocity == 2)//Lagrangian { if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15) { array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY); UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } else { noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = ZeroVector(3); noalias(itNode->FastGetSolutionStepValue(DISPLACEMENT)) = ZeroVector(3); } } } } KRATOS_CATCH("") } //*************************************************************************** //predicts the solution at the current step as // v = vold void Predict(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dv, TSystemVectorType& b) override { // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "prediction" << std::endl; int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.Nodes().size(), NumThreads, NodePartition); #pragma omp parallel { //array_1d<double, 3 > DeltaDisp; int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k + 1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; itNode++) { array_1d<double, 3 > & OldVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY, 1); double& OldPressure = (itNode)->FastGetSolutionStepValue(PRESSURE, 1); //predicting velocity //ATTENTION::: the prediction is performed only on free nodes array_1d<double, 3 > & CurrentVelocity = (itNode)->FastGetSolutionStepValue(VELOCITY); double& CurrentPressure = (itNode)->FastGetSolutionStepValue(PRESSURE); if ((itNode->pGetDof(VELOCITY_X))->IsFree()) (CurrentVelocity[0]) = OldVelocity[0]; if (itNode->pGetDof(VELOCITY_Y)->IsFree()) (CurrentVelocity[1]) = OldVelocity[1]; if (itNode->HasDofFor(VELOCITY_Z)) if (itNode->pGetDof(VELOCITY_Z)->IsFree()) (CurrentVelocity[2]) = OldVelocity[2]; if (itNode->pGetDof(PRESSURE)->IsFree()) CurrentPressure = OldPressure; // updating time derivatives ::: please note that displacements and // their time derivatives can not be consistently fixed separately array_1d<double, 3 > DeltaVel; noalias(DeltaVel) = CurrentVelocity - OldVelocity; array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION); UpdateAcceleration(CurrentAcceleration, DeltaVel, OldAcceleration); if (mMeshVelocity == 2) //Lagrangian { array_1d<double, 3 > & OldDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 1); array_1d<double, 3 > & CurrentDisplacement = (itNode)->FastGetSolutionStepValue(DISPLACEMENT, 0); if((itNode)->FastGetSolutionStepValue(IS_LAGRANGIAN_INLET) < 1e-15) { noalias(itNode->FastGetSolutionStepValue(MESH_VELOCITY)) = itNode->FastGetSolutionStepValue(VELOCITY); UpdateDisplacement(CurrentDisplacement, OldDisplacement, OldVelocity, OldAcceleration, CurrentAcceleration); } else { itNode->FastGetSolutionStepValue(MESH_VELOCITY_X) = 0.0; itNode->FastGetSolutionStepValue(MESH_VELOCITY_Y) = 0.0; itNode->FastGetSolutionStepValue(DISPLACEMENT_X) = 0.0; itNode->FastGetSolutionStepValue(DISPLACEMENT_Y) = 0.0; } } } } // if (rModelPart.GetCommunicator().MyPID() == 0) // std::cout << "end of prediction" << std::endl; } //*************************************************************************** /** this function is designed to be called in the builder and solver to introduce the selected time integration scheme. It "asks" the matrix needed to the element and performs the operations needed to introduce the seected time integration scheme. this function calculates at the same time the contribution to the LHS and to the RHS of the system */ void CalculateSystemContributions( Element& rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY int k = OpenMPUtils::ThisThread(); //Initializing the non linear iteration for the current element rCurrentElement.InitializeNonLinearIteration(CurrentProcessInfo); //basic operations for the element considered rCurrentElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); rCurrentElement.CalculateMassMatrix(mMass[k], CurrentProcessInfo); rCurrentElement.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (statics is already included) AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution( Element& rCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { int k = OpenMPUtils::ThisThread(); //Initializing the non linear iteration for the current element rCurrentElement.InitializeNonLinearIteration(CurrentProcessInfo); //basic operations for the element considered rCurrentElement.CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo); rCurrentElement.CalculateMassMatrix(mMass[k], CurrentProcessInfo); rCurrentElement.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentElement, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // If there is a slip condition, apply it on a rotated system of coordinates mRotationTool.Rotate(RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement.GetGeometry()); } /** functions totally analogous to the precedent but applied to the "condition" objects */ void CalculateSystemContributions( Condition& rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY int k = OpenMPUtils::ThisThread(); rCurrentCondition.InitializeNonLinearIteration(CurrentProcessInfo); rCurrentCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); rCurrentCondition.CalculateMassMatrix(mMass[k], CurrentProcessInfo); //rCurrentCondition.CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); rCurrentCondition.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution, CurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId, CurrentProcessInfo); AddDynamicsToLHS(LHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k], CurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution( Condition& rCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; int k = OpenMPUtils::ThisThread(); //Initializing the non linear iteration for the current condition rCurrentCondition.InitializeNonLinearIteration(rCurrentProcessInfo); //basic operations for the element considered rCurrentCondition.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.CalculateMassMatrix(mMass[k],rCurrentProcessInfo); //rCurrentCondition.CalculateDampingMatrix(VelocityBossakAuxiliaries::mDamp,CurrentProcessInfo); rCurrentCondition.CalculateLocalVelocityContribution(mDamp[k], RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId,rCurrentProcessInfo); //adding the dynamic contributions (static is already included) AddDynamicsToRHS(rCurrentCondition, RHS_Contribution, mDamp[k], mMass[k],rCurrentProcessInfo); // Rotate contributions (to match coordinates for slip conditions) mRotationTool.Rotate(RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH(""); } //************************************************************************************* //************************************************************************************* void InitializeSolutionStep(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b); double DeltaTime = CurrentProcessInfo[DELTA_TIME]; KRATOS_ERROR_IF(DeltaTime < 1.0e-12) << "Detected delta_time = 0 in the Bossak scheme. Check if the time step is created correctly for the current model part" << std::endl; //initializing constants ma0 = 1.0 / (mGammaNewmark * DeltaTime); ma1 = DeltaTime * mBetaNewmark / mGammaNewmark; ma2 = (-1 + mGammaNewmark) / mGammaNewmark; ma3 = DeltaTime; ma4 = pow(DeltaTime, 2)*(-2.0 * mBetaNewmark + 1.0) / 2.0; ma5 = pow(DeltaTime, 2) * mBetaNewmark; mam = (1.0 - mAlphaBossak) / (mGammaNewmark * DeltaTime); } //************************************************************************************* //************************************************************************************* void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { const auto& r_current_process_info = rModelPart.GetProcessInfo(); if (mpTurbulenceModel) // If not null mpTurbulenceModel->Execute(); //if orthogonal subscales are computed if (r_current_process_info[OSS_SWITCH] == 1.0) { KRATOS_INFO("Bossak Scheme") << "Computing OSS projections" << std::endl; const int nnodes = static_cast<int>(rModelPart.Nodes().size()); auto nbegin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(nbegin,nnodes) for(int i=0; i<nnodes; ++i) { auto ind = nbegin + i; noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; }//end of loop over nodes //loop on nodes to compute ADVPROJ CONVPROJ NODALAREA array_1d<double, 3 > output = ZeroVector(3); const int nel = static_cast<int>(rModelPart.Elements().size()); auto elbegin = rModelPart.ElementsBegin(); #pragma omp parallel for firstprivate(elbegin,nel,output) for(int i=0; i<nel; ++i) { auto elem = elbegin + i; elem->Calculate(ADVPROJ, output, r_current_process_info); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); // Correction for periodic conditions this->PeriodicConditionProjectionCorrection(rModelPart); #pragma omp parallel for firstprivate(nbegin,nnodes) for(int i=0; i<nnodes; ++i) { auto ind = nbegin + i; if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0) { ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0; //KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************"); } const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); ind->FastGetSolutionStepValue(ADVPROJ) /= Area; ind->FastGetSolutionStepValue(DIVPROJ) /= Area; } } } void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { Element::EquationIdVectorType EquationId; LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode) #pragma omp parallel for for(int k = 0; k<static_cast<int>(rModelPart.Nodes().size()); k++) { auto itNode = rModelPart.NodesBegin() + k; (itNode->FastGetSolutionStepValue(REACTION)).clear(); // calculating relaxed acceleration const array_1d<double, 3 > & CurrentAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 0); const array_1d<double, 3 > & OldAcceleration = (itNode)->FastGetSolutionStepValue(ACCELERATION, 1); const array_1d<double, 3> relaxed_acceleration = (1 - mAlphaBossak) * CurrentAcceleration + mAlphaBossak * OldAcceleration; (itNode)->SetValue(RELAXED_ACCELERATION, relaxed_acceleration); } //for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem) #pragma omp parallel for firstprivate(EquationId,RHS_Contribution,LHS_Contribution) for(int k = 0; k<static_cast<int>(rModelPart.Elements().size()); k++) { auto itElem = rModelPart.Elements().ptr_begin()+k; int thread_id = OpenMPUtils::ThisThread(); (*itElem)->InitializeNonLinearIteration(CurrentProcessInfo); //KRATOS_WATCH(LHS_Contribution); //basic operations for the element considered (*itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); //std::cout << rCurrentElement->Id() << " RHS = " << RHS_Contribution << std::endl; (*itElem)->CalculateMassMatrix(mMass[thread_id], CurrentProcessInfo); (*itElem)->CalculateLocalVelocityContribution(mDamp[thread_id], RHS_Contribution, CurrentProcessInfo); (*itElem)->EquationIdVector(EquationId, CurrentProcessInfo); //adding the dynamic contributions (statics is already included) AddDynamicsToLHS(LHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo); AddDynamicsToRHS(**itElem, RHS_Contribution, mDamp[thread_id], mMass[thread_id], CurrentProcessInfo); GeometryType& rGeom = (*itElem)->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { auto& reaction = rGeom[i].FastGetSolutionStepValue(REACTION); double& target_value0 = reaction[0]; const double& origin_value0 = RHS_Contribution[index++]; #pragma omp atomic target_value0 -= origin_value0; double& target_value1 = reaction[1]; const double& origin_value1 = RHS_Contribution[index++]; #pragma omp atomic target_value1 -= origin_value1; if (Dimension == 3) { double& target_value2 = reaction[2]; const double& origin_value2 = RHS_Contribution[index++]; #pragma omp atomic target_value2 -= origin_value2; } // rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++]; // rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++]; // if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++]; index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); // Base scheme calls FinalizeSolutionStep method of elements and conditions Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, A, Dx, b); } //************************************************************************************************ //************************************************************************************************ /// Free memory allocated by this object. void Clear() override { this->mpDofUpdater->Clear(); } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ double mAlphaBossak; double mBetaNewmark; double mGammaNewmark; double mMeshVelocity; double mRelaxationFactor = 1.0; double ma0; double ma1; double ma2; double ma3; double ma4; double ma5; double mam; std::vector< Matrix > mMass; std::vector< Matrix > mDamp; std::vector< Vector > mvel; std::vector< Vector > macc; std::vector< Vector > maccold; /*@} */ /**@name Protected Operators*/ /*@{ */ /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { const int num_nodes = rModelPart.NumberOfNodes(); const int num_conditions = rModelPart.NumberOfConditions(); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; it_node->SetValue(NODAL_AREA,0.0); it_node->SetValue(ADVPROJ,ZeroVector(3)); it_node->SetValue(DIVPROJ,0.0); } #pragma omp parallel for for (int i = 0; i < num_conditions; i++) { auto it_cond = rModelPart.ConditionsBegin() + i; if(it_cond->Is(PERIODIC)) { this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry()); } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; this->CorrectContributionsOnPeriodicNode(*it_node); } } void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry) { unsigned int nodes_in_cond = rGeometry.PointsNumber(); double nodal_area = 0.0; array_1d<double,3> momentum_projection = ZeroVector(3); double mass_projection = 0.0; for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA); noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ); mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ); } for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; /* Note that this loop is expected to be threadsafe in normal conditions, * since each node should belong to a single periodic link. However, I am * setting the locks for openmp in case that we try more complicated things * in the future (like having different periodic conditions for different * coordinate directions). */ r_node.SetLock(); r_node.GetValue(NODAL_AREA) = nodal_area; noalias(r_node.GetValue(ADVPROJ)) = momentum_projection; r_node.GetValue(DIVPROJ) = mass_projection; r_node.UnSetLock(); } } void CorrectContributionsOnPeriodicNode(Node<3>& rNode) { if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set. { rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA); noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ); rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ); } } //********************************************************************************* //Updating first time Derivative //********************************************************************************* void UpdateDisplacement(array_1d<double, 3 > & CurrentDisplacement, const array_1d<double, 3 > & OldDisplacement, const array_1d<double, 3 > & OldVelocity, const array_1d<double, 3 > & OldAcceleration, const array_1d<double, 3 > & CurrentAcceleration) { noalias(CurrentDisplacement) = OldDisplacement + ma3 * OldVelocity + ma4 * OldAcceleration + ma5*CurrentAcceleration; } //************************************************************************** void UpdateAcceleration(array_1d<double, 3 > & CurrentAcceleration, const array_1d<double, 3 > & DeltaVel, const array_1d<double, 3 > & OldAcceleration) { noalias(CurrentAcceleration) = ma0 * DeltaVel + ma2 * OldAcceleration; } //**************************************************************************** /** Kdyn = am*M + D + a1*K */ void AddDynamicsToLHS(LocalSystemMatrixType& LHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& M, const ProcessInfo& CurrentProcessInfo) { //multipling time scheme factor LHS_Contribution *= ma1; // adding mass contribution to the dynamic stiffness if (M.size1() != 0) // if M matrix declared { noalias(LHS_Contribution) += mam*M; } //adding damping contribution if (D.size1() != 0) // if M matrix declared { noalias(LHS_Contribution) += D; } } //**************************************************************************** /// Add Bossak contributions from the inertial term to the RHS vector. /** This essentially performs bdyn = b - M*acc for the current element. * Note that viscous/pressure contributions to the RHS are expected to be added by the element itself. * @param[in] rCurrentElement The fluid element we are assembling. * @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added. * @param[in] rD The elemental velocity/pressure LHS matrix. * @param[in] rM The elemental acceleration LHS matrix. * @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart. */ void AddDynamicsToRHS( Element& rCurrentElement, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo) { //adding inertia contribution if (rM.size1() != 0) { const auto& r_const_elem_ref = rCurrentElement; int k = OpenMPUtils::ThisThread(); r_const_elem_ref.GetSecondDerivativesVector(macc[k], 0); (macc[k]) *= (1.00 - mAlphaBossak); r_const_elem_ref.GetSecondDerivativesVector(maccold[k], 1); noalias(macc[k]) += mAlphaBossak * maccold[k]; noalias(rRHS_Contribution) -= prod(rM, macc[k]); } } /// Add Bossak contributions from the inertial term to the RHS vector. /** This essentially performs bdyn = b - M*acc for the current condition. * Note that viscous/pressure contributions to the RHS are expected to be added by the element condition. * @param[in] rCurrentCondition The fluid condition we are assembling. * @param[in/out] rRHS_Contribution The right hand side term where the contribution will be added. * @param[in] rD The elemental velocity/pressure LHS matrix. * @param[in] rM The elemental acceleration LHS matrix. * @param[in] rCurrentProcessInfo ProcessInfo instance for the containing ModelPart. */ void AddDynamicsToRHS( Condition& rCurrentCondition, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& D, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo) { //adding inertia contribution if (rM.size1() != 0) { const auto& r_const_cond_ref = rCurrentCondition; int k = OpenMPUtils::ThisThread(); r_const_cond_ref.GetSecondDerivativesVector(macc[k], 0); (macc[k]) *= (1.00 - mAlphaBossak); r_const_cond_ref.GetSecondDerivativesVector(maccold[k], 1); noalias(macc[k]) += mAlphaBossak * maccold[k]; noalias(rRHS_Contribution) -= prod(rM, macc[k]); } } /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool; const Variable<int>& mrPeriodicIdVar; Process::Pointer mpTurbulenceModel; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class Scheme */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME defined */
single1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void foo (int i) { #pragma omp single copyprivate (i) ; #pragma omp single nowait ; #pragma omp single copyprivate (i) nowait /* { dg-error "clause must not be used together with" } */ ; #pragma omp single nowait copyprivate (i) /* { dg-error "clause must not be used together with" } */ ; }
omp_dsymm_batch.c
/** * @file omp_dsymm_batch.c * * @brief BBLAS omp_dsymm_batch double routine. * * BBLAS is a software package provided by Univ. of Manchester, * Univ. of Tennessee. * * @version 1.0.0 * @author Samuel D. Relton * @author Pedro V. Lara * @author Mawussi Zounon * @date 2016-02-20 * **/ #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Code generation * @generated from ./bblas_omp/omp_zsymm_batch.c normal z -> d, Mon Jun 6 09:44:14 2016 **/ #endif #include<cblas.h> #include "bblas_omp.h" #include "bblas.h" #include <omp.h> #define REAL /** Purpose ------- <b>dsymm_batch</b> is an OpenMP version of dsymm_batch. It performs one of the matrix-matrix operations arrayC[i] = alpha[i]*arrayA[i]*arrayB[i] + beta[i]*arrayC[i], or arrayC[i] = alpha[i]*arrayB[i]*arrayA[i] + beta[i]*arrayC[i], where alpha[i] and beta[i] are scalars, arrayA[i] is a symmetric matrix and arrayB[i] and arrayC[i] are M[i] by N[i] matrices. Fixed and Variable Batch Operations ----------------------------------- Two types of batch operation are supported depending upon the value of batch_opts. When <tt>batch_opts = BBLAS_VARIABLE</tt> - all parameters that are arrays must have length at least batch_count. - all parameters that are arrays must have all values set. When <tt>batch_opts = BBLAS_FIXED</tt> - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) must have length at least one. - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) need only to have their first value set. This means that for a <tt>BBLAS_FIXED</tt> batch, the values of side[0], uplo[0], M[0], N[0], alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations. Parameters ---------- @param[in] side Array of <tt>enum BBLAS_SIDE</tt>. Each element side[i] specifies whether the symmetric matrix arrayA[i] appears on the left or right side of the operation as follows: - = 'BblasLeft' arrayC[i] = alpha[i]*arrayA[i]*arrayB[i] + beta[i]*arrayC[i]. - = 'BblasRight' arrayC[i] = alpha[i]*arrayB[i]*arrayA[i] + beta[i]*arrayC[i]. @param[in] uplo Array of <tt>enum BBLAS_UPLO</tt>. On entry, uplo[i] specifies whether the upper or lower triangular part of the symmetric matrix arrayA[i] is to be referenced as follows: - = 'BblasUpper' Only the upper triangular part of arrayA[i] is to be referenced. - = 'BblasLower' Only the lower triangular part of arrayA[i] is to be referenced. @param[in] M Array of <tt>int</tt>. Each element M[i] specifies the number of rows of the matrix arrayC[i]. M[i] must be greater than zero. @param[in] N Array of <tt>int</tt>. Each element N[i] specifies the number of columns of the matrix arrayC[i]. N[i] must be greater than zero. @param[in] alpha Array of <tt>real_16</tt>. @param[in] arrayA Array of pointers. Each element arrayA[i] is a pointer to a DOUBLE PRECISION matrix of dimension lda[i] by Ka[i], where Ka[i] = M[i] when side[i] = BblasLeft and is N[i] otherwise. When using side[i] = BblasLeft the M[i] by M[i] part of arrayA[i] must contain the symmetric matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the upper triangular part of the symmetric matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the lower triangular part of the symmetric matrix whilst the strictly upper triangular part is not used. When using side[i] = BblasRight the N[i] by N[i] part of arrayA[i] must contain the symmetric matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the upper triangular part of the symmetric matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the lower triangular part of the symmetric matrix whilst the strictly upper triangular part is not used. @param[in] lda Array of <tt>int</tt>. On entry, lda[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When side[i] = BblasLeft then lda[i] must be at least max( 1, M[i] ), otherwise lda[i] must be at least max( 1, N[i] ). @param[in] arrayB Array of pointers. Each element arrayB[i] is a pointer to a DOUBLE PRECISION matrix of dimension ldb[i] by N[i]. The leading M[i] by N[i] part of arrayB[i] must contain the matrix elements. @param[in] ldb Array of <tt>int</tt>. Each element ldb[i] specifies the first dimension of arrayB[i] as declared in the calling (sub) program. Each element ldb[i] must be at least max( 1, M[i] ). @param[in] beta Array of <tt>real_16</tt>. When beta[i] is set to zero arrayC[i] need not be set on input. @param[in,out] arrayC Array of pointers. Each element arrayC[i] is a pointer to a DOUBLE PRECISION matrix of dimension ldc[i] by N[i]. Before entry, the leading M[i] by N[i] part of the arrayC[i] must contain a matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the matrix arrayC[i] is overwritten by the M[i] by N[i] matrix output. @param[in] ldc Array of <tt>int</tt>. Each element ldc[i] specifies the first dimension of arrayC[i] as declared in the calling (sub) program. The value ldc[i] must be at least max( 1, M[i] ). @param[in] batch_count <tt>int</tt> The number of matrices to operate on. @param[in] batch_opts <tt>enum BBLAS_OPTS</tt> One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of batch operation required. @param[out] info Array of <tt>int</tt>. Each element info[i] is the error return code of the ith zymm in the batch, these need not be set on entry. The error codes can be found in bblas_macros.h. **/ void omp_dsymm_batch( const enum BBLAS_SIDE *side, const enum BBLAS_UPLO *uplo, const int *M, const int *N, const double *alpha, const double **arrayA, const int *lda, const double **arrayB, const int *ldb, const double *beta, double **arrayC, const int *ldc, const int batch_count, const enum BBLAS_OPTS batch_opts, int *info) { /*Local variables */ int first_index = 0; int batch_iter; int LDA; char func_name[15] = "dsymm_batch"; /* Check input arguments */ if (batch_count < 0) { xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1); } if (batch_opts == BBLAS_FIXED) { if ((side[first_index] != BblasLeft) && (side[first_index] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_SIDE; } return; } if ((uplo[first_index] != BblasUpper) && (uplo[first_index] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_UPLO; } return; } if (M[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_M; } return; } if (N[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_N; } return; } if (side[first_index] == BblasLeft) { LDA = M[first_index]; } else { LDA = N[first_index]; } if (lda[first_index] < LDA) { xerbla_batch(func_name, BBLAS_ERR_LDA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDA; } return; } if (ldb[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDB; } return; } if (ldc[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDC, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDC; } return; } /* particular case */ if (M[first_index] == 0 || N[first_index] == 0 || (alpha[first_index] == (double)0.0 && beta[first_index] == (double)1.0)) { for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_SUCCESS; } return; } #pragma omp parallel for private( batch_iter) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /*Call to cblas_dsymm */ cblas_dsymm( BblasColMajor, side[first_index], uplo[first_index], M[first_index], N[first_index], (alpha[first_index]), arrayA[batch_iter], lda[first_index], arrayB[batch_iter], ldb[first_index], (beta[first_index]), arrayC[batch_iter], ldc[first_index]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } /*END FIXED SIZE FOR LOOP */ }else if (batch_opts == BBLAS_VARIABLE) { #pragma omp parallel for private( batch_iter, LDA) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /* Check input arguments */ if ((side[batch_iter] != BblasLeft) && (side[batch_iter] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, batch_iter); info[batch_iter] = BBLAS_ERR_SIDE; continue; } if ((uplo[batch_iter] != BblasUpper) && (uplo[batch_iter] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter); info[batch_iter] = BBLAS_ERR_UPLO; continue; } if (M[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, batch_iter); info[batch_iter] = BBLAS_ERR_M; continue; } if (N[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, batch_iter); info[batch_iter] = BBLAS_ERR_N; continue; } if (side[batch_iter] == BblasLeft) { LDA = M[batch_iter]; } else { LDA = N[batch_iter]; } if (lda[batch_iter] < LDA) { xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter); info[batch_iter] = BBLAS_ERR_LDA; continue; } if (ldb[batch_iter] < max(1, M[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter); info[batch_iter] = BBLAS_ERR_LDB; continue; } if (ldc[batch_iter] < max(1, M[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter); info[batch_iter] = BBLAS_ERR_LDC; continue; } /* particular case */ if (M[batch_iter] == 0 || N[batch_iter] == 0 || (alpha[batch_iter] == (double)0.0 && beta[batch_iter] == (double)1.0)) { info[batch_iter] = BBLAS_SUCCESS; continue; } cblas_dsymm( BblasColMajor, side[batch_iter], uplo[batch_iter], M[batch_iter], N[batch_iter], (alpha[batch_iter]), arrayA[batch_iter], lda[batch_iter], arrayB[batch_iter], ldb[batch_iter], (beta[batch_iter]), arrayC[batch_iter], ldc[batch_iter]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } }else { xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1); } } #undef REAL
GB_unop__bnot_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__bnot_uint64_uint64) // op(A') function: GB (_unop_tran__bnot_uint64_uint64) // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = ~(aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ~(x) ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = ~(z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BNOT || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__bnot_uint64_uint64) ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = ~(z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = ~(z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__bnot_uint64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blackscholes.c
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif //ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #endif //Precision to use for calculations #define fptype float #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF ( fptype InputX ) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv( fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet ) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log( sptprice / strike ); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF( d1 ); NofXd2 = CNDF( d2 ); FutureValueX = strike * ( exp( -(rate)*(time) ) ); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i=begin; i!=end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-5 ){ fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j=0; j<NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 0; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i=0; i<numOptions; i++) { #else //ENABLE_OPENMP for (i=start; i<end; i++) { #endif //ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } return 0; } #endif //ENABLE_TBB int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = atoi(argv[1]); char *inputFile = argv[2]; char *outputFile = argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if(nThreads > numOptions) { printf("WARNING: Not enough work, reducing number of threads to match number of options.\n"); nThreads = numOptions; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif // alloc spaces for the option data data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %lu\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 HANDLE *threads; int *nums; threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE)); nums = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); free(tids); #endif //WIN32 #else //ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid=0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else //ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid=0; bs_thread(&tid); #else //ENABLE_TBB //serial version int tid=0; bs_thread(&tid); #endif //ENABLE_TBB #endif //ENABLE_OPENMP #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
omp_hello.c
#include <stdio.h> int main(int argc, char* argv[]) { printf("Obey your master!\n"); #pragma omp parallel { printf("Slave to the grind\n"); } printf("Back with master\n"); }
GB_binop__land_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_uint16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__land_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__land_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_uint16) // A*D function (colscale): GB (_AxD__land_uint16) // D*A function (rowscale): GB (_DxB__land_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__land_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__land_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_uint16) // C=scalar+B GB (_bind1st__land_uint16) // C=scalar+B' GB (_bind1st_tran__land_uint16) // C=A+scalar GB (_bind2nd__land_uint16) // C=A'+scalar GB (_bind2nd_tran__land_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_UINT16 || GxB_NO_LAND_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__land_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__land_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__land_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
toimg.c
/* Copyright 2013-2018 The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013, 2015 Martin Uecker <uecker@eecs.berkeley.edu> * 2015, 2018 Jon Tamir <jtamir@eecs.berkeley.edu> */ #include <stdlib.h> #include <assert.h> #include <stdio.h> #include <stdint.h> #include <strings.h> #include <complex.h> #include <stdbool.h> #include <math.h> #include "num/multind.h" #include "num/init.h" #include "num/flpmath.h" #include "misc/misc.h" #include "misc/debug.h" #include "misc/mmio.h" #include "misc/png.h" #include "misc/dicom.h" #include "misc/opts.h" #ifndef DIMS #define DIMS 16 #endif #ifndef CFL_SIZE #define CFL_SIZE sizeof(complex float) #endif static const char usage_str[] = "[-h] <input> <output_prefix>"; static const char help_str[] = "Create magnitude images as png or proto-dicom.\n" "The first two non-singleton dimensions will\n" "be used for the image, and the other dimensions\n" "will be looped over.\n"; // from view:src/draw.c static double clamp(double a, double b, double x) { return (x < a) ? a : ((x > b) ? b : x); } static double windowing(double g, double a, double b, double x) { return pow(clamp(0., 1., (x - a) / (b - a)), g); } static void toimg(bool dicom, bool use_windowing, const char* name, long inum, float gamma, float contrast, float window, float scale, long h, long w, const complex float* data) { int len = strlen(name); assert(len >= 1); int nr_bytes = dicom ? 2 : 3; unsigned char (*buf)[h][w][nr_bytes] = TYPE_ALLOC(unsigned char[h][w][nr_bytes]); float max_val = dicom ? 65535. : 255.; for (int i = 0; i < h; i++) { for (int j = 0; j < w; j++) { double val = cabsf(data[j * h + i]) / scale; unsigned int value = (unsigned int)(max_val * (use_windowing ? windowing(gamma, contrast, window, val) : val)); if (!dicom) { (*buf)[i][j][0] = value; (*buf)[i][j][1] = value; (*buf)[i][j][2] = value; } else { (*buf)[i][j][0] = (value >> 0) & 0xFF; (*buf)[i][j][2] = (value >> 8) & 0xFF; } } } (dicom ? dicom_write : png_write_rgb24)(name, w, h, inum, &(*buf)[0][0][0]); free(buf); } static void toimg_stack(const char* name, bool dicom, bool single_scale, bool use_windowing, float gamma, float contrast, float window, const long dims[DIMS], const complex float* data) { long data_size = md_calc_size(DIMS, dims); long sq_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; int l = 0; for (int i = 0; i < DIMS; i++) if (1 != dims[i]) sq_dims[l++] = dims[i]; float max = 0.; for (long i = 0; i < data_size; i++) max = MAX(cabsf(data[i]), max); int len = strlen(name); assert(len >= 1); long num_imgs = md_calc_size(DIMS - 2, sq_dims + 2); long img_size = md_calc_size(2, sq_dims); debug_printf(DP_INFO, "Writing %d image(s)...", num_imgs); #pragma omp parallel for for (long i = 0; i < num_imgs; i++) { char name_i[len + 10]; // extra space for ".0000.png" if (num_imgs > 1) sprintf(name_i, "%s-%04ld.%s", name, i, dicom ? "dcm" : "png"); else sprintf(name_i, "%s.%s", name, dicom ? "dcm" : "png"); float scale = 0.; if (use_windowing) scale = md_znorm(2, sq_dims, data + i * img_size) / md_calc_size(2, sq_dims); else if (single_scale) scale = max; else for (long j = 0; j < md_calc_size(2, sq_dims); j++) scale = MAX(cabsf(data[i * img_size + j]), scale); if (0. == scale) scale = 1.; toimg(dicom, use_windowing, name_i, i, gamma, contrast, window, scale, sq_dims[0], sq_dims[1], data + i * img_size); } debug_printf(DP_INFO, "done.\n", num_imgs); } int main_toimg(int argc, char* argv[]) { float gamma = 1.; float contrast = 0.; float window = 750.; bool use_windowing = false; bool single_scale = true; bool dicom = false; const struct opt_s opts[] = { OPT_FLOAT('g', &gamma, "gamma", "gamma level"), OPT_FLOAT('c', &contrast, "contrast", "contrast level"), OPT_FLOAT('w', &window, "window", "window level"), OPT_SET('d', &dicom, "write to dicom format (deprecated, use extension .dcm)"), OPT_CLEAR('m', &single_scale, "re-scale each image"), OPT_SET('W', &use_windowing, "use dynamic windowing"), }; cmdline(&argc, argv, 2, 2, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); char* ext = rindex(argv[2], '.'); if (NULL != ext) { assert(!dicom); if (0 == strcmp(ext, ".dcm")) dicom = true; else if (0 != strcmp(ext, ".png")) error("Unknown file extension."); *ext = '\0'; } long dims[DIMS]; complex float* data = load_cfl(argv[1], DIMS, dims); toimg_stack(argv[2], dicom, single_scale, use_windowing, gamma, contrast, window, dims, data); unmap_cfl(DIMS, dims, data); return 0; }
omp_parallel_sections_private.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel sections private directive.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp parallel sections private</ompts:directive> <ompts:dependences>omp critical</ompts:dependences> <ompts:testcode> #include <stdio.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_parallel_sections_private</ompts:testcode:functionname>(FILE * logFile){ int sum=7; int sum0=0; int known_sum; int i; #pragma omp parallel sections private(<ompts:check>sum0,</ompts:check><ompts:crosscheck></ompts:crosscheck>i) { #pragma omp section { sum0=0; for (i=1;i<400;i++) sum0=sum0+i; #pragma omp critical { sum= sum+sum0; } /*end of critical */ } #pragma omp section { sum0=0; for(i=400;i<700;i++) sum0=sum0+i; #pragma omp critical { sum= sum+sum0; } /*end of critical */ } #pragma omp section { sum0=0; for(i=700;i<1000;i++) sum0=sum0+i; #pragma omp critical { sum= sum+sum0; } /*end of critical */ } } /*end of paralell sections*/ known_sum=(999*1000)/2+7; return (known_sum==sum); } /* end of check_section_private*/ </ompts:testcode> </ompts:test>
tov_interp.h
// This C header file reads a TOV solution from data file and performs // 1D interpolation of the solution to a desired radius. // Author: Zachariah B. Etienne // zachetie **at** gmail **dot* com #include "stdio.h" #include "stdlib.h" #include "math.h" #include "string.h" #define REAL double //#define STANDALONE_UNIT_TEST int count_num_lines_in_file(FILE *in1Dpolytrope) { int numlines_in_file = 0; char * line = NULL; size_t len = 0; ssize_t read; while ((read = getline(&line, &len, in1Dpolytrope)) != -1) { numlines_in_file++; } rewind(in1Dpolytrope); free(line); return numlines_in_file; } int read_datafile__set_arrays(FILE *in1Dpolytrope, REAL *r_Schw_arr,REAL *rho_arr,REAL *rho_baryon_arr,REAL *P_arr,REAL *M_arr,REAL *expnu_arr,REAL *exp4phi_arr,REAL *rbar_arr) { char * line = NULL; size_t len = 0; ssize_t read; int which_line = 0; while ((read = getline(&line, &len, in1Dpolytrope)) != -1) { // Define the line delimiters (i.e., the stuff that goes between the data on a given // line of data. Here, we define both spaces " " and tabs "\t" as data delimiters. const char delimiters[] = " \t"; //Now we define "token", a pointer to the first column of data char *token; //Each successive time we call strtok(NULL,blah), we read in a new column of data from // the originally defined character array, as pointed to by token. token=strtok(line, delimiters); if(token==NULL) { printf("BADDDD\n"); return 1; } r_Schw_arr[which_line] = strtod(token, NULL); token = strtok( NULL, delimiters ); rho_arr[which_line] = strtod(token, NULL); token = strtok( NULL, delimiters ); rho_baryon_arr[which_line] = strtod(token, NULL); token = strtok( NULL, delimiters ); P_arr[which_line] = strtod(token, NULL); token = strtok( NULL, delimiters ); M_arr[which_line] = strtod(token, NULL); token = strtok( NULL, delimiters ); expnu_arr[which_line] = strtod(token, NULL); token = strtok( NULL, delimiters ); exp4phi_arr[which_line] = strtod(token, NULL); token = strtok( NULL, delimiters ); rbar_arr[which_line] = strtod(token, NULL); which_line++; } free(line); return 0; } void TOV_interpolate_1D(REAL rrbar,const REAL Rbar,const int Rbar_idx,const int interp_stencil_size, const int numlines_in_file,const REAL *r_Schw_arr,const REAL *rho_arr,const REAL *rho_baryon_arr,const REAL *P_arr,const REAL *M_arr,const REAL *expnu_arr,const REAL *exp4phi_arr,const REAL *rbar_arr, REAL *rho,REAL *rho_baryon,REAL *P,REAL *M,REAL *expnu,REAL *exp4phi) { // Find interpolation index using Bisection root-finding algorithm: int bisection_idx_finder(const REAL rr, const int numlines_in_file, const REAL *rbar_arr) { int x1 = 0; int x2 = numlines_in_file-1; REAL y1 = rrbar-rbar_arr[x1]; REAL y2 = rrbar-rbar_arr[x2]; if(y1*y2 >= 0) { fprintf(stderr,"INTERPOLATION BRACKETING ERROR %e | %e %e\n",rr,y1,y2); exit(1); } for(int i=0;i<numlines_in_file;i++) { int x_midpoint = (x1+x2)/2; REAL y_midpoint = rrbar-rbar_arr[x_midpoint]; if(y_midpoint*y1 < 0) { x2 = x_midpoint; y2 = y_midpoint; } else { x1 = x_midpoint; y1 = y_midpoint; } if( abs(x2-x1) == 1 ) { // If rbar_arr[x1] is closer to rrbar than rbar_arr[x2] then return x1: if(fabs(rrbar-rbar_arr[x1]) < fabs(rrbar-rbar_arr[x2])) return x1; // Otherwiser return x2: return x2; } } fprintf(stderr,"INTERPOLATION BRACKETING ERROR: DID NOT CONVERGE.\n"); exit(1); } // For this case, we know that for all functions, f(r) = f(-r) if(rrbar < 0) rrbar = -rrbar; // First find the central interpolation stencil index: int idx = bisection_idx_finder(rrbar,numlines_in_file,rbar_arr); #ifdef MAX #undef MAX #endif #define MAX(A, B) ( ((A) > (B)) ? (A) : (B) ) int idxmin = MAX(0,idx-interp_stencil_size/2-1); #ifdef MIN #undef MIN #endif #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) // -= Do not allow the interpolation stencil to cross the star's surface =- // max index is when idxmin + (interp_stencil_size-1) = Rbar_idx // -> idxmin at most can be Rbar_idx - interp_stencil_size + 1 if(rrbar < Rbar) { idxmin = MIN(idxmin,Rbar_idx - interp_stencil_size + 1); } else { idxmin = MAX(idxmin,Rbar_idx+1); idxmin = MIN(idxmin,numlines_in_file - interp_stencil_size + 1); } // Now perform the Lagrange polynomial interpolation: // First set the interpolation coefficients: REAL rbar_sample[interp_stencil_size]; for(int i=idxmin;i<idxmin+interp_stencil_size;i++) { rbar_sample[i-idxmin] = rbar_arr[i]; } REAL l_i_of_r[interp_stencil_size]; for(int i=0;i<interp_stencil_size;i++) { REAL numer = 1.0; REAL denom = 1.0; for(int j=0;j<i;j++) { numer *= rrbar - rbar_sample[j]; denom *= rbar_sample[i] - rbar_sample[j]; } for(int j=i+1;j<interp_stencil_size;j++) { numer *= rrbar - rbar_sample[j]; denom *= rbar_sample[i] - rbar_sample[j]; } l_i_of_r[i] = numer/denom; } // Then perform the interpolation: *rho = 0.0; *rho_baryon = 0.0; *P = 0.0; *M = 0.0; *expnu = 0.0; *exp4phi = 0.0; REAL r_Schw = 0.0; for(int i=idxmin;i<idxmin+interp_stencil_size;i++) { r_Schw += l_i_of_r[i-idxmin] * r_Schw_arr[i]; *rho += l_i_of_r[i-idxmin] * rho_arr[i]; *rho_baryon += l_i_of_r[i-idxmin] * rho_baryon_arr[i]; *P += l_i_of_r[i-idxmin] * P_arr[i]; *M += l_i_of_r[i-idxmin] * M_arr[i]; *expnu += l_i_of_r[i-idxmin] * expnu_arr[i]; *exp4phi += l_i_of_r[i-idxmin] * exp4phi_arr[i]; } if(rrbar > Rbar) { *rho = 0; *rho_baryon = 0; *P = 0; *M = M_arr[Rbar_idx+1]; *expnu = 1. - 2.*(*M) / r_Schw; *exp4phi = pow(r_Schw / rrbar,2.0); } } // To compile, copy this file to tov_interp.c, and then run: // gcc -Ofast tov_interp.c -o tov_interp -DSTANDALONE_UNIT_TEST #ifdef STANDALONE_UNIT_TEST int main() { // Open the data file: char filename[100]; sprintf(filename,"../outputTOVpolytrope.txt"); FILE *in1Dpolytrope = fopen(filename, "r"); if (in1Dpolytrope == NULL) { fprintf(stderr,"ERROR: could not open file %s\n",filename); exit(1); } // Count the number of lines in the data file: int numlines_in_file = count_num_lines_in_file(in1Dpolytrope); // Allocate space for all data arrays: REAL *r_Schw_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file); REAL *rho_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file); REAL *rho_baryon_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file); REAL *P_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file); REAL *M_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file); REAL *expnu_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file); REAL *exp4phi_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file); REAL *rbar_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file); // Read from the data file, filling in arrays if(read_datafile__set_arrays(in1Dpolytrope, r_Schw_arr,rho_arr,rho_baryon_arr,P_arr,M_arr,expnu_arr,exp4phi_arr,rbar_arr) == 1) { fprintf(stderr,"ERROR WHEN READING FILE %s!\n",filename); exit(1); } fclose(in1Dpolytrope); REAL Rbar = -100; int Rbar_idx = -100; for(int i=1;i<numlines_in_file;i++) { if(rho_arr[i-1]>0 && rho_arr[i]==0) { Rbar = rbar_arr[i-1]; Rbar_idx = i-1; } } if(Rbar<0) { fprintf(stderr,"Error: could not find r=R from data file.\n"); exit(1); } // Next, interpolate! // Create trial radius array: int num_r_pts = 100000; //REAL *r_out_arr = (REAL *)malloc(sizeof(REAL)*num_r_pts); struct drand48_data randBuffer; srand48_r(1313, &randBuffer); #pragma omp parallel for for(int i=0;i<num_r_pts;i++) { REAL rrbar; drand48_r(&randBuffer,&rrbar); //rrbar *= 10.; //rbar_arr[numlines_in_file-1]; rrbar = rrbar*0.1 + 0.8; //rbar_arr[numlines_in_file-1]; REAL rho,rho_baryon,P,M,expnu,exp4phi; TOV_interpolate_1D(rrbar,Rbar,Rbar_idx,4, numlines_in_file,r_Schw_arr,rho_arr,rho_baryon_arr,P_arr,M_arr,expnu_arr,exp4phi_arr,rbar_arr, &rho,&rho_baryon,&P,&M,&expnu,&exp4phi); printf("%e %e %e %e %e %e %e\n",rrbar,rho,rho_baryon,P,M,expnu,exp4phi); } // Free the malloc()'s! free(r_Schw_arr); free(rho_arr); free(rho_baryon_arr); free(P_arr); free(M_arr); free(expnu_arr); free(exp4phi_arr); free(rbar_arr); return 0; } #endif
GB_unaryop__abs_fp32_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp32_uint8 // op(A') function: GB_tran__abs_fp32_uint8 // C type: float // A type: uint8_t // cast: float cij = (float) aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp32_uint8 ( float *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
episerver_fmt_plug.c
/* *New* EPiServer cracker patch for JtR. Hacked together during Summer of * 2012 by Dhiru Kholia <dhiru.kholia at gmail.com> for GSoC. Based on sample * code by hashcat's atom. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Obtaining hashes from EPiServer 6.x: * * sqlcmd -L * sqlcmd -S <server> -U sa -P <password> * * 1> SELECT name from sys.databases * 2> go * 1> use <database name> * 2> select Email, PasswordFormat, PasswordSalt, Password from aspnet_Membership * 3> go * * JtR Input Format: * * user:$episerver$*version*base64(salt)*base64(hash) * * Where, * * version == 0, for EPiServer 6.x standard config / .NET <= 3.5 SHA1 hash/salt format. * hash = sha1(salt | utf16bytes(password)), PasswordFormat == 1 * * * version == 1, EPiServer 6.x + .NET >= 4.x SHA256 hash/salt format, * PasswordFormat == ? * * Improved performance, JimF, July 2012. * Full Unicode support, magnum, August 2012. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_episerver; #elif FMT_REGISTERS_H john_register_one(&fmt_episerver); #else #include <string.h> #include <assert.h> #include <errno.h> #include "sha.h" #include "sha2.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "base64_convert.h" #include "unicode.h" #include "memdbg.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 // core i7 no HT #endif #endif #define FORMAT_LABEL "EPiServer" #define FORMAT_NAME "" #define FORMAT_TAG "$episerver$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define BINARY_SIZE 32 /* larger of the two */ #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct custom_salt) #define EFFECTIVE_SALT_SIZE 16 #define SALT_ALIGN 4 #ifdef SIMD_COEF_32 #include "simd-intrinsics.h" #include "johnswap.h" #define NBKEYS_SHA1 (SIMD_COEF_32 * SIMD_PARA_SHA1) #define NBKEYS_SHA256 (SIMD_COEF_32 * SIMD_PARA_SHA256) #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1 * SIMD_PARA_SHA256) #define HASH_IDX_IN (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32) #define HASH_IDX_SHA1 (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32) #define HASH_IDX_SHA256 (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32) #define HASH_IDX_OUT (cur_salt->version == 0 ? HASH_IDX_SHA1 : HASH_IDX_SHA256) #if ARCH_LITTLE_ENDIAN==1 #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 ) //for endianness conversion #else #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 ) //for endianness conversion #endif #define ALGORITHM_NAME "SHA1/SHA256 " SHA256_ALGORITHM_NAME #define PLAINTEXT_LENGTH 19 // (64 - 9 - 16)/2 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #else #define ALGORITHM_NAME "SHA1/SHA256 32/" ARCH_BITS_STR " " SHA2_LIB #define PLAINTEXT_LENGTH 32 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 16 #endif static struct fmt_tests episerver_tests[] = { {"$episerver$*0*fGJ2wn/5WlzqQoDeCA2kXA==*UQgnz/vPWap9UeD8Dhaw3h/fgFA=", "testPassword"}, {"$episerver$*0*fGJ2wn/5WlzqQoDeCA2kXA==*uiP1YrZlVcHESbfsRt/wljwNeYU=", "sss"}, {"$episerver$*0*fGJ2wn/5WlzqQoDeCA2kXA==*dxTlKqnxaVHs0210VcX+48QDonA=", "notused"}, // hashes from pass_gen.pl, including some V1 data {"$episerver$*0*OHdOb002Z1J6ZFhlRHRzbw==*74l+VCC9xkGP27sNLPLZLRI/O5A", "test1"}, {"$episerver$*0*THk5ZHhYNFdQUDV1Y0hScg==*ik+FVrPkEs6LfJU88xl5oBRoZjY", ""}, {"$episerver$*1*aHIza2pUY0ZkR2dqQnJrNQ==*1KPAZriqakiNvE6ML6xkUzS11QPREziCvYkJc4UtjWs","test1"}, {"$episerver$*1*RUZzRmNja0c5NkN0aDlMVw==*nh46rc4vkFIL0qGUrKTPuPWO6wqoESSeAxUNccEOe28","thatsworking"}, {"$episerver$*1*cW9DdnVVUnFwM2FobFc4dg==*Zr/nekpDxU5gjt+fzTSqm0j/twZySBBW44Csoai2Fug","test3"}, {"$episerver$*0*b0lvUnlWbkVlSFJQTFBMeg==*K7NAoB/wZfZjsG4DuMkNqKYwfTs", "123456789"}, {NULL} }; #ifdef SIMD_COEF_32 static uint32_t *saved_key; static uint32_t *crypt_out; #else static char (*saved_key)[3 * PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; #endif static struct custom_salt { int version; unsigned char esalt[18 + 1]; /* base64 decoding, 24 / 4 * 3 = 18 */ } *cur_salt; #if defined(_OPENMP) || defined(SIMD_COEF_32) static int omp_t = 1; #endif #ifdef SIMD_COEF_32 static void episerver_set_key_utf8(char *_key, int index); static void episerver_set_key_CP(char *_key, int index); #endif static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 saved_key = mem_calloc_align(self->params.max_keys_per_crypt*SHA_BUF_SIZ, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_out = mem_calloc_align(self->params.max_keys_per_crypt*BINARY_SIZE/4, sizeof(*crypt_out), MEM_ALIGN_SIMD); #else saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); #endif #ifdef SIMD_COEF_32 if (options.target_enc == UTF_8) { self->methods.set_key = episerver_set_key_utf8; self->params.plaintext_length = PLAINTEXT_LENGTH * 3; } else if (options.target_enc != ISO_8859_1 && options.target_enc != ASCII) self->methods.set_key = episerver_set_key_CP; #else if (options.target_enc == UTF_8) self->params.plaintext_length = PLAINTEXT_LENGTH * 3; #endif } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ptr, *ctcopy, *keeptr; size_t res; char tmp[128]; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip leading '$episerver$*' */ if (strlen(ciphertext) > 255) goto error; if (!(ptr = strtokm(ctcopy, "*"))) goto error; /* check version, must be '0' or '1' */ if (*ptr != '0' && *ptr != '1') goto error; if (!(ptr = strtokm(NULL, "*"))) /* salt */ goto error; if (strlen(ptr) > 24) goto error; res = base64_valid_length(ptr, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0); if (res < strlen(ptr)) goto error; res = base64_convert(ptr, e_b64_mime, strlen(ptr), tmp, e_b64_raw, sizeof(tmp), flg_Base64_MIME_TRAIL_EQ, 0); if (res != 16) /* decoded salt size should be 16 bytes */ goto error; if (!(ptr = strtokm(NULL, "*"))) /* hash */ goto error; if (strlen(ptr) > 44) goto error; res = base64_valid_length(ptr, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0); if (res < strlen(ptr)) goto error; res = base64_convert(ptr, e_b64_mime, strlen(ptr), tmp, e_b64_raw, sizeof(tmp), flg_Base64_MIME_TRAIL_EQ, 0); if (res != 20 && res != 32) /* SHA1 or SHA256 output size */ goto error; if ((ptr = strtokm(NULL, "*"))) /* end */ goto error; MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char _ctcopy[256], *ctcopy=_ctcopy; char *p; memset(&cs, 0, sizeof(cs)); strncpy(ctcopy, ciphertext, 255); ctcopy[255] = 0; ctcopy += FORMAT_TAG_LEN; /* skip over "$episerver$*" */ p = strtokm(ctcopy, "*"); cs.version = atoi(p); p = strtokm(NULL, "*"); base64_convert(p, e_b64_mime, strlen(p), (char*)cs.esalt, e_b64_raw, sizeof(cs.esalt), flg_Base64_NO_FLAGS, 0); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; memset(buf.c, 0, sizeof(buf.c)); p = strrchr(ciphertext, '*') + 1; base64_convert(p, e_b64_mime, strlen(p), (char*)out, e_b64_raw, sizeof(buf.c), flg_Base64_DONOT_NULL_TERMINATE, 0); #if defined(SIMD_COEF_32) && ARCH_LITTLE_ENDIAN==1 alter_endianity(out, BINARY_SIZE); #endif return out; } // can not use common-get-hash.h since the HASH_IDX_OUT will vary between 5 and // 8 limbs based upon the current salt. No way for pre-processor to handle that. // the only way to use common-get-hash.h would be to make a format for sha1, and // a 2nd for sha256. But since we already have a singlular format, we will simply // not use the common code here. #ifdef SIMD_COEF_32 static int get_hash_0 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_0; } static int get_hash_1 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_1; } static int get_hash_2 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_2; } static int get_hash_3 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_3; } static int get_hash_4 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_4; } static int get_hash_5 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_5; } static int get_hash_6 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } #endif static void set_salt(void *salt) { #ifdef SIMD_COEF_32 int index, j; cur_salt = (struct custom_salt *)salt; for (index = 0; index < MAX_KEYS_PER_CRYPT*omp_t; ++index) for (j = 0; j < EFFECTIVE_SALT_SIZE; ++j) // copy the salt to vector buffer ((unsigned char*)saved_key)[GETPOS(j, index)] = ((unsigned char*)cur_salt->esalt)[j]; #else cur_salt = (struct custom_salt *)salt; #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #ifdef SIMD_COEF_32 for (index = 0; index < count; index += (cur_salt->version == 0 ? NBKEYS_SHA1 : NBKEYS_SHA256)) { uint32_t *in = &saved_key[HASH_IDX_IN]; uint32_t *out = &crypt_out[HASH_IDX_OUT]; if (cur_salt->version == 0) SIMDSHA1body(in, out, NULL, SSEi_MIXED_IN); else if (cur_salt->version == 1) SIMDSHA256body(in, out, NULL, SSEi_MIXED_IN); } #else for (index = 0; index < count; index++) { unsigned char passwordBuf[PLAINTEXT_LENGTH*2+2]; int len; len = enc_to_utf16((UTF16*)passwordBuf, PLAINTEXT_LENGTH, (UTF8*)saved_key[index], strlen(saved_key[index])); if (len < 0) len = strlen16((UTF16*)passwordBuf); len <<= 1; if (cur_salt->version == 0) { SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->esalt, EFFECTIVE_SALT_SIZE); SHA1_Update(&ctx, passwordBuf, len); SHA1_Final((unsigned char*)crypt_out[index], &ctx); } else if (cur_salt->version == 1) { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, cur_salt->esalt, EFFECTIVE_SALT_SIZE); SHA256_Update(&ctx, passwordBuf, len); SHA256_Final((unsigned char*)crypt_out[index], &ctx); } } #endif return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) { #ifdef SIMD_COEF_32 if (*((uint32_t*)binary) == crypt_out[HASH_IDX_OUT]) #else if (*((uint32_t*)binary) == crypt_out[index][0]) #endif return 1; } return 0; } static int cmp_one(void *binary, int index) { #if SIMD_COEF_32 return *((uint32_t*)binary) == crypt_out[HASH_IDX_OUT]; #else return (*((uint32_t*)binary) == crypt_out[index][0]); #endif } static int cmp_exact(char *source, int index) { void *binary = get_binary(source); #if SIMD_COEF_32 uint32_t out[BINARY_SIZE/4]; int i; for (i = 0; i < BINARY_SIZE/4; ++i) out[i] = crypt_out[HASH_IDX_OUT + i*SIMD_COEF_32]; if (cur_salt->version == 0) return !memcmp(binary, out, 20); else return !memcmp(binary, out, BINARY_SIZE); #else if (cur_salt->version == 0) return !memcmp(binary, crypt_out[index], 20); else return !memcmp(binary, crypt_out[index], BINARY_SIZE); #endif } static void episerver_set_key(char *_key, int index) { #ifdef SIMD_COEF_32 unsigned char *key = (unsigned char*)_key; uint32_t *keybuf = &saved_key[HASH_IDX_IN]; uint32_t *keybuf_word = keybuf + 4*SIMD_COEF_32; // skip over the salt unsigned int len, temp2; len = EFFECTIVE_SALT_SIZE >> 1; while((temp2 = *key++)) { unsigned int temp; if ((temp = *key++)) { #if ARCH_LITTLE_ENDIAN==1 *keybuf_word = JOHNSWAP((temp << 16) | temp2); #else *keybuf_word = (temp2 << 24) | (temp<<8); #endif } else { #if ARCH_LITTLE_ENDIAN==1 *keybuf_word = JOHNSWAP((0x80 << 16) | temp2); #else *keybuf_word = (temp2 << 24) | 0x8000; #endif len++; goto key_cleaning; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = (0x80U << 24); key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } keybuf[15*SIMD_COEF_32] = len << 4; #else strcpy(saved_key[index], _key); #endif } #ifdef SIMD_COEF_32 static void episerver_set_key_CP(char *_key, int index) { unsigned char *key = (unsigned char*)_key; uint32_t *keybuf = &saved_key[HASH_IDX_IN]; uint32_t *keybuf_word = keybuf + 4*SIMD_COEF_32; // skip over the salt unsigned int len, temp2; len = EFFECTIVE_SALT_SIZE >> 1; while((temp2 = *key++)) { unsigned int temp; temp2 = CP_to_Unicode[temp2]; if ((temp = *key++)) { temp = CP_to_Unicode[temp]; *keybuf_word = JOHNSWAP((temp << 16) | temp2); } else { *keybuf_word = JOHNSWAP((0x80 << 16) | temp2); len++; goto key_cleaning; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = (0x80U << 24); key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } keybuf[15*SIMD_COEF_32] = len << 4; } #endif #ifdef SIMD_COEF_32 static void episerver_set_key_utf8(char *_key, int index) { const UTF8 *source = (UTF8*)_key; uint32_t *keybuf = &saved_key[HASH_IDX_IN]; uint32_t *keybuf_word = keybuf + 4*SIMD_COEF_32; // skip over the salt UTF32 chl, chh = 0x80; unsigned int len; len = EFFECTIVE_SALT_SIZE >> 1; while (*source) { chl = *source; if (chl >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 2: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 1: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 0: break; default: goto bailout; } chl -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; if (chl > UNI_MAX_BMP) { if (len == PLAINTEXT_LENGTH + (EFFECTIVE_SALT_SIZE>>1)) { chh = 0x80; *keybuf_word = JOHNSWAP((chh << 16) | chl); keybuf_word += SIMD_COEF_32; break; } #define halfBase 0x0010000UL #define halfShift 10 #define halfMask 0x3FFUL #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_LOW_START (UTF32)0xDC00 chl -= halfBase; chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);; chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START); len++; } else if (*source && len < PLAINTEXT_LENGTH + (EFFECTIVE_SALT_SIZE>>1)) { chh = *source; if (chh >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chh & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 2: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 1: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 0: break; default: goto bailout; } chh -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; } else { chh = 0x80; *keybuf_word = JOHNSWAP((chh << 16) | chl); keybuf_word += SIMD_COEF_32; break; } *keybuf_word = JOHNSWAP((chh << 16) | chl); keybuf_word += SIMD_COEF_32; } if (chh != 0x80 || len == (EFFECTIVE_SALT_SIZE>>1)) { *keybuf_word = (0x80U << 24); keybuf_word += SIMD_COEF_32; } bailout: while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } keybuf[15*SIMD_COEF_32] = len << 4; } #endif static char *get_key(int index) { #ifdef SIMD_COEF_32 static UTF16 out[PLAINTEXT_LENGTH + 1]; unsigned int i,s; s = ((saved_key[HASH_IDX_IN + 15*SIMD_COEF_32] >> 3) - 16) >> 1; for (i = 0; i < s; i++) out[i] = ((unsigned char*)saved_key)[GETPOS(16 + (i<<1), index)] | (((unsigned char*)saved_key)[GETPOS(16 + (i<<1) + 1, index)] << 8); out[i] = 0; #if defined (SIMD_COEF_32) && !ARCH_LITTLE_ENDIAN alter_endianity_w16(out, s<<1); #endif return (char*)utf16_to_enc(out); #else return saved_key[index]; #endif } /* report hash type: 1 SHA1, 2 SHA256 */ static unsigned int hash_type(void *salt) { struct custom_salt *my_salt = salt; return (unsigned int) (1 + my_salt->version); } struct fmt_main fmt_episerver = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8, { "hash type [1:SHA1 2:SHA256]", }, { FORMAT_TAG }, episerver_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { hash_type, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, episerver_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
ddot_kahan_omp_sse_intrin.c
#ifdef __SSE__ #include <stdio.h> #include <math.h> #include <omp.h> #include "immintrin.h" extern double ddot_kahan_sse_intrin(int, const double*, const double*, double*); void ddot_kahan_omp_sse_intrin(int N, const double *a, const double *b, double *r) { double *sum; double *c; int nthreads; #pragma omp parallel { #pragma omp single { nthreads = omp_get_num_threads(); if ((sum = (double *)malloc(nthreads * sizeof(double))) == NULL) { perror("malloc"); exit(EXIT_FAILURE); } if ((c = (double *)malloc(nthreads * sizeof(double))) == NULL) { perror("malloc"); exit(EXIT_FAILURE); } } } if (N < nthreads) { ddot_kahan_sse_intrin(N, a, b, r); return; } #pragma omp parallel { int i, id; id = omp_get_thread_num(); /* calculate each threads chunk */ int alignment = 64 / sizeof(double); int gchunk = ((N/alignment)+(nthreads-1))/nthreads; gchunk = gchunk * alignment; int chunk = gchunk; if ((id+1)*chunk > N) chunk = N-(id*chunk); if (chunk < 0) chunk = 0; /* each thread sums part of the array */ c[id] = ddot_kahan_sse_intrin(chunk, a+id*gchunk, b+id*gchunk, &sum[id]); } // end #pragma omp parallel /* perform scalar Kahan sum of partial sums */ double scalar_c = 0.0f; double scalar_sum = 0.0f; #pragma novector for (int i=0; i<nthreads; ++i) { scalar_c = scalar_c + c[i]; double y = sum[i]-scalar_c; double t = scalar_sum+y; scalar_c = (t-scalar_sum)-y; scalar_sum = t; } sum[0] = scalar_sum; *r = sum[0]; } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
fig4.39-firstprivate-alternative.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #define TRUE 1 #define FALSE 0 #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #endif int main() { int *a; int n = 2, nthreads, vlen, indx, offset = 4, i, TID; int failed; #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(3); #endif indx = offset; /* ------------------------------------------------------------------------ Set up parameters for computation and allocate memory ------------------------------------------------------------------------ */ #pragma omp parallel firstprivate(indx) shared(a,n,nthreads,failed) { #pragma omp single { nthreads = omp_get_num_threads(); vlen = indx + n*nthreads; if ( (a = (int *) malloc(vlen*sizeof(int))) == NULL ) failed = TRUE; else failed = FALSE; } } /*-- End of parallel region --*/ if ( failed == TRUE ) { printf("Fatal error: memory allocation for a failed vlen = %d\n",vlen); return(-1); } else { printf("Diagnostics:\n"); printf("nthreads = %d\n",nthreads); printf("indx = %d\n",indx); printf("n = %d\n",n); printf("vlen = %d\n",vlen); } for(i=0; i<vlen; i++) a[i] = -i-1; /* ------------------------------------------------------------------------ Each thread starts access to array a through variable offset ------------------------------------------------------------------------ */ printf("Length of segment per thread is %d\n",n); printf("Offset for vector a is %d\n",indx); #pragma omp parallel default(none) private(i,TID,indx) \ shared(n,offset,a) { TID = omp_get_thread_num(); indx = offset + n*TID; for(i=indx; i<indx+n; i++) a[i] = TID + 1; } /*-- End of parallel region --*/ printf("After the parallel region:\n"); for (i=0; i<vlen; i++) printf("a[%d] = %d\n",i,a[i]); free(a); return(0); }
simplifyPredicate.c
int main() { int s = 0; #pragma omp parallel { int x; x = 0; while (!x) { s = 1; #pragma omp barrier s = 2; #pragma omp barrier s = 3; #pragma omp barrier } x++; } x++; }
gates.h
/* * This file is part of Quantum++. * * MIT License * * Copyright (c) 2013 - 2019 Vlad Gheorghiu (vgheorgh@gmail.com) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /** * \file classes/gates.h * \brief Quantum gates */ #ifndef CLASSES_GATES_H_ #define CLASSES_GATES_H_ namespace qpp { /** * \class qpp::Gates * \brief const Singleton class that implements most commonly used gates */ class Gates final : public internal::Singleton<const Gates> // const Singleton { friend class internal::Singleton<const Gates>; public: // One qubit gates cmat Id2{cmat::Identity(2, 2)}; ///< Identity gate cmat H{cmat::Zero(2, 2)}; ///< Hadamard gate cmat X{cmat::Zero(2, 2)}; ///< Pauli Sigma-X gate cmat Y{cmat::Zero(2, 2)}; ///< Pauli Sigma-Y gate cmat Z{cmat::Zero(2, 2)}; ///< Pauli Sigma-Z gate cmat S{cmat::Zero(2, 2)}; ///< S gate cmat T{cmat::Zero(2, 2)}; ///< T gate // two qubit gates cmat CNOT{cmat::Identity(4, 4)}; ///< Controlled-NOT control target gate cmat CZ{cmat::Identity(4, 4)}; ///< Controlled-Phase gate cmat CNOTba{cmat::Zero(4, 4)}; ///< Controlled-NOT target->control gate cmat SWAP{cmat::Identity(4, 4)}; ///< SWAP gate // three qubit gates cmat TOF{cmat::Identity(8, 8)}; ///< Toffoli gate cmat FRED{cmat::Identity(8, 8)}; ///< Fredkin gate private: /** * \brief Initializes the gates */ Gates() { H << 1 / std::sqrt(2.), 1 / std::sqrt(2.), 1 / std::sqrt(2.), -1 / std::sqrt(2.); X << 0, 1, 1, 0; Z << 1, 0, 0, -1; Y << 0, -1_i, 1_i, 0; S << 1, 0, 0, 1_i; T << 1, 0, 0, std::exp(1_i * pi / 4.0); CNOT.block(2, 2, 2, 2) = X; CNOTba(0, 0) = 1; CNOTba(1, 3) = 1; CNOTba(2, 2) = 1; CNOTba(3, 1) = 1; CZ(3, 3) = -1; SWAP.block(1, 1, 2, 2) = X; TOF.block(6, 6, 2, 2) = X; FRED.block(4, 4, 4, 4) = SWAP; } /** * \brief Default destructor */ ~Gates() = default; public: // variable gates // one qubit gates /** * \brief Qubit rotation of \a theta about the * 3-dimensional real (unit) vector \a n * * \param theta Rotation angle * \param n 3-dimensional real (unit) vector * \return Rotation gate */ cmat Rn(double theta, const std::vector<double>& n) const { // EXCEPTION CHECKS // check 3-dimensional vector if (n.size() != 3) throw exception::CustomException( "qpp::Gates::Rn()", "n is not a 3-dimensional vector!"); // END EXCEPTION CHECKS cmat result(2, 2); result = std::cos(theta / 2) * Id2 - 1_i * std::sin(theta / 2) * (n[0] * X + n[1] * Y + n[2] * Z); return result; } /** * \brief Qubit rotation of \a theta about the X axis * * \param theta Rotation angle * \return Rotation gate */ cmat RX(double theta) const { // EXCEPTION CHECKS // END EXCEPTION CHECKS return Rn(theta, {1, 0, 0}); } /** * \brief Qubit rotation of \a theta about the Y axis * * \param theta Rotation angle * \return Rotation gate */ cmat RY(double theta) const { // EXCEPTION CHECKS // END EXCEPTION CHECKS return Rn(theta, {0, 1, 0}); } /** * \brief Qubit rotation of \a theta about the Z axis * * \param theta Rotation angle * \return Rotation gate */ cmat RZ(double theta) const { // EXCEPTION CHECKS // END EXCEPTION CHECKS return Rn(theta, {0, 0, 1}); } // one quDit gates /** * \brief Generalized Z gate for qudits * * \note Defined as \f$ Z = \sum_{j=0}^{D-1} \exp(2\pi \mathrm{i} j/D) * |j\rangle\langle j| \f$ * * \param D Dimension of the Hilbert space * \return Generalized Z gate for qudits */ cmat Zd(idx D = 2) const { // EXCEPTION CHECKS // check valid dimension if (D == 0) throw exception::DimsInvalid("qpp::Gates::Zd()"); // END EXCEPTION CHECKS cmat result = cmat::Zero(D, D); for (idx i = 0; i < D; ++i) result(i, i) = std::pow(omega(D), static_cast<double>(i)); return result; } /** * \brief SWAP gate for qudits * * \param D Dimension of the Hilbert space * \return SWAP gate for qudits */ cmat SWAPd(idx D = 2) const { // EXCEPTION CHECKS // check valid dimension if (D == 0) throw exception::DimsInvalid("qpp::Gates::SWAPd()"); // END EXCEPTION CHECKS cmat result = cmat::Zero(D * D, D * D); #ifdef WITH_OPENMP_ #pragma omp parallel for collapse(2) #endif // WITH_OPENMP_ // column major order for speed for (idx j = 0; j < D; ++j) for (idx i = 0; i < D; ++i) result(D * i + j, i + D * j) = 1; return result; } /** * \brief Quantum Fourier transform gate for qudits * * \note Defined as * \f$ F = \sum_{j,k=0}^{D-1} \exp(2\pi \mathrm{i} jk/D) |j\rangle\langle k| * \f$ * * \param D Dimension of the Hilbert space * \return Fourier transform gate for qudits */ cmat Fd(idx D = 2) const { // EXCEPTION CHECKS // check valid dimension if (D == 0) throw exception::DimsInvalid("qpp::Gates::Fd()"); // END EXCEPTION CHECKS cmat result(D, D); #ifdef WITH_OPENMP_ #pragma omp parallel for collapse(2) #endif // WITH_OPENMP_ // column major order for speed for (idx j = 0; j < D; ++j) for (idx i = 0; i < D; ++i) result(i, j) = 1 / std::sqrt(D) * std::pow(omega(D), static_cast<double>(i * j)); return result; } /** * \brief Modular multiplication gate for qubits * Implements \f$ |x\rangle \longrightarrow |ax \mathrm{ mod } N\rangle \f$ * * \note For the gate to be unitary, \a a and \a N should be co-prime. The * function does not check co-primality in release versions! * * \note The number of qubits required to implement the gate should satisfy * \f$ n \geq \lceil\log_2(N)\rceil \f$ * * \param a Positive integer less than \a N * \param N Positive integer * \param n Number of qubits required for implementing the gate * \return Modular multiplication gate */ cmat MODMUL(idx a, idx N, idx n) const { // check co-primality (unitarity) only in DEBUG version #ifndef NDEBUG assert(gcd(a, N) == 1); #endif // EXCEPTION CHECKS // check valid arguments if (N < 3 || a >= N) { throw exception::OutOfRange("qpp::Gates::MODMUL()"); } // check enough qubits if (n < static_cast<idx>(std::ceil(std::log2(N)))) { throw exception::OutOfRange("qpp::Gates::MODMUL()"); } // END EXCEPTION CHECKS // minimum number of qubits required to implement the gate idx D = static_cast<idx>(std::llround(std::pow(2, n))); cmat result = cmat::Zero(D, D); #ifdef WITH_OPENMP_ #pragma omp parallel for collapse(2) #endif // WITH_OPENMP_ // column major order for speed for (idx j = 0; j < N; ++j) for (idx i = 0; i < N; ++i) if (static_cast<idx>(modmul(j, a, N)) == i) result(i, j) = 1; #ifdef WITH_OPENMP_ #pragma omp parallel for #endif // WITH_OPENMP_ // complete the matrix for (idx i = N; i < D; ++i) result(i, i) = 1; return result; } /** * \brief Generalized X gate for qudits * * \note Defined as \f$ X = \sum_{j=0}^{D-1} |j\oplus 1\rangle\langle j| * \f$, i.e. raising operator \f$ X|j\rangle = |j\oplus 1\rangle\f$ * * \param D Dimension of the Hilbert space * \return Generalized X gate for qudits */ cmat Xd(idx D = 2) const { // EXCEPTION CHECKS // check valid dimension if (D == 0) throw exception::DimsInvalid("qpp::Gates::Xd()"); // END EXCEPTION CHECKS return Fd(D).inverse() * Zd(D) * Fd(D); } /** * \brief Identity gate * * \note Can change the return type from complex matrix (default) * by explicitly specifying the template parameter * * \param D Dimension of the Hilbert space * \return Identity gate on a Hilbert space of dimension \a D */ template <typename Derived = Eigen::MatrixXcd> Derived Id(idx D = 2) const { // EXCEPTION CHECKS // check valid dimension if (D == 0) throw exception::DimsInvalid("qpp::Gates::Id()"); // END EXCEPTION CHECKS return Derived::Identity(D, D); } /** * \brief Generates the multi-partite multiple-controlled-\a A gate * in matrix form * \see qpp::applyCTRL() * * \note The dimension of the gate \a A must match * the dimension of \a target * * \param A Eigen expression * \param ctrl Control subsystem indexes * \param target Subsystem indexes where the gate \a A is applied * \param n Total number of subsystems * \param d Subsystem dimensions * \return CTRL-A gate, as a matrix over the same scalar field as \a A */ template <typename Derived> dyn_mat<typename Derived::Scalar> CTRL(const Eigen::MatrixBase<Derived>& A, const std::vector<idx>& ctrl, const std::vector<idx>& target, idx n, idx d = 2) const { const dyn_mat<typename Derived::Scalar>& rA = A.derived(); // EXCEPTION CHECKS // check matrix zero-size if (!internal::check_nonzero_size(rA)) throw exception::ZeroSize("qpp::Gates::CTRL()"); // check square matrix if (!internal::check_square_mat(rA)) throw exception::MatrixNotSquare("qpp::Gates::CTRL()"); // check lists zero-size if (ctrl.size() == 0) throw exception::ZeroSize("qpp::Gates::CTRL()"); if (target.size() == 0) throw exception::ZeroSize("qpp::Gates::CTRL()"); // check out of range if (n == 0) throw exception::OutOfRange("qpp::Gates::CTRL()"); // check valid local dimension if (d == 0) throw exception::DimsInvalid("qpp::Gates::CTRL()"); // ctrl + gate subsystem vector std::vector<idx> ctrlgate = ctrl; ctrlgate.insert(std::end(ctrlgate), std::begin(target), std::end(target)); std::sort(std::begin(ctrlgate), std::end(ctrlgate)); std::vector<idx> dims(n, d); // local dimensions vector // check that ctrl + gate subsystem is valid // with respect to local dimensions if (!internal::check_subsys_match_dims(ctrlgate, dims)) throw exception::SubsysMismatchDims("qpp::Gates::CTRL()"); // check that target list match the dimension of the matrix using Index = typename dyn_mat<typename Derived::Scalar>::Index; if (rA.rows() != static_cast<Index>(std::llround(std::pow(d, target.size())))) throw exception::DimsMismatchMatrix("qpp::Gates::CTRL()"); // END EXCEPTION CHECKS // Use static allocation for speed! idx Cdims[maxn]; idx midx_row[maxn]; idx midx_col[maxn]; idx CdimsA[maxn]; idx midxA_row[maxn]; idx midxA_col[maxn]; idx Cdims_bar[maxn]; idx Csubsys_bar[maxn]; idx midx_bar[maxn]; idx n_gate = target.size(); idx n_ctrl = ctrl.size(); idx n_subsys_bar = n - ctrlgate.size(); idx D = static_cast<idx>(std::llround(std::pow(d, n))); idx DA = static_cast<idx>(rA.rows()); idx Dsubsys_bar = static_cast<idx>(std::llround(std::pow(d, n_subsys_bar))); // compute the complementary subsystem of ctrlgate w.r.t. dims std::vector<idx> subsys_bar = complement(ctrlgate, n); std::copy(std::begin(subsys_bar), std::end(subsys_bar), std::begin(Csubsys_bar)); for (idx k = 0; k < n; ++k) { midx_row[k] = midx_col[k] = 0; Cdims[k] = d; } for (idx k = 0; k < n_subsys_bar; ++k) { Cdims_bar[k] = d; midx_bar[k] = 0; } for (idx k = 0; k < n_gate; ++k) { midxA_row[k] = midxA_col[k] = 0; CdimsA[k] = d; } dyn_mat<typename Derived::Scalar> result = dyn_mat<typename Derived::Scalar>::Identity(D, D); dyn_mat<typename Derived::Scalar> Ak; // run over the complement indexes for (idx i = 0; i < Dsubsys_bar; ++i) { // get the complement row multi-index internal::n2multiidx(i, n_subsys_bar, Cdims_bar, midx_bar); for (idx k = 0; k < d; ++k) { Ak = powm(rA, k); // compute rA^k // run over the target row multi-index for (idx a = 0; a < DA; ++a) { // get the target row multi-index internal::n2multiidx(a, n_gate, CdimsA, midxA_row); // construct the result row multi-index // first the ctrl part (equal for both row and column) for (idx c = 0; c < n_ctrl; ++c) midx_row[ctrl[c]] = midx_col[ctrl[c]] = k; // then the complement part (equal for column) for (idx c = 0; c < n_subsys_bar; ++c) midx_row[Csubsys_bar[c]] = midx_col[Csubsys_bar[c]] = midx_bar[c]; // then the target part for (idx c = 0; c < n_gate; ++c) midx_row[target[c]] = midxA_row[c]; // run over the target column multi-index for (idx b = 0; b < DA; ++b) { // get the target column multi-index internal::n2multiidx(b, n_gate, CdimsA, midxA_col); // construct the result column multi-index for (idx c = 0; c < n_gate; ++c) midx_col[target[c]] = midxA_col[c]; // finally write the values result(internal::multiidx2n(midx_row, n, Cdims), internal::multiidx2n(midx_col, n, Cdims)) = Ak(a, b); } } } } return result; } /** * \brief Expands out * \see qpp::kron() * * Expands out \a A as a matrix in a multi-partite system. * Faster than using qpp::kron(I, I, ..., I, A, I, ..., I). * * \param A Eigen expression * \param pos Position * \param dims Dimensions of the multi-partite system * \return Tensor product * \f$ I\otimes\cdots\otimes I\otimes A \otimes I \otimes\cdots\otimes I\f$, * with \a A on position \a pos, as a dynamic matrix * over the same scalar field as \a A */ template <typename Derived> dyn_mat<typename Derived::Scalar> expandout(const Eigen::MatrixBase<Derived>& A, idx pos, const std::vector<idx>& dims) const { const dyn_mat<typename Derived::Scalar>& rA = A.derived(); // EXCEPTION CHECKS // check zero-size if (!internal::check_nonzero_size(rA)) throw exception::ZeroSize("qpp::Gates::expandout()"); // check that dims is a valid dimension vector if (!internal::check_dims(dims)) throw exception::DimsInvalid("qpp::Gates::expandout()"); // check square matrix if (!internal::check_square_mat(rA)) throw exception::MatrixNotSquare("qpp::Gates::expandout()"); // check that position is valid if (pos + 1 > dims.size()) throw exception::OutOfRange("qpp::Gates::expandout()"); // check that dims[pos] match the dimension of A if (static_cast<idx>(rA.rows()) != dims[pos]) throw exception::DimsMismatchMatrix("qpp::Gates::expandout()"); // END EXCEPTION CHECKS idx D = std::accumulate(std::begin(dims), std::end(dims), static_cast<idx>(1), std::multiplies<idx>()); dyn_mat<typename Derived::Scalar> result = dyn_mat<typename Derived::Scalar>::Identity(D, D); idx Cdims[maxn]; idx midx_row[maxn]; idx midx_col[maxn]; for (idx k = 0; k < dims.size(); ++k) { midx_row[k] = midx_col[k] = 0; Cdims[k] = dims[k]; } // run over the main diagonal multi-indexes for (idx i = 0; i < D; ++i) { // get row multi_index internal::n2multiidx(i, dims.size(), Cdims, midx_row); // get column multi_index (same as row) internal::n2multiidx(i, dims.size(), Cdims, midx_col); // run over the gate row multi-index for (idx a = 0; a < static_cast<idx>(rA.rows()); ++a) { // construct the total row multi-index midx_row[pos] = a; // run over the gate column multi-index for (idx b = 0; b < static_cast<idx>(rA.cols()); ++b) { // construct the total column multi-index midx_col[pos] = b; // finally write the values result(internal::multiidx2n(midx_row, dims.size(), Cdims), internal::multiidx2n(midx_col, dims.size(), Cdims)) = rA(a, b); } } } return result; } /** * \brief Expands out * \see qpp::kron() * * Expands out \a A as a matrix in a multi-partite system. * Faster than using qpp::kron(I, I, ..., I, A, I, ..., I). * * \note The std::initializer_list overload exists because otherwise, in the * degenerate case when \a dims has only one element, the one element list * is implicitly converted to the element's underlying type, i.e. qpp::idx, * which has the net effect of picking the wrong (non-vector) * qpp::expandout() overload * * \param A Eigen expression * \param pos Position * \param dims Dimensions of the multi-partite system * \return Tensor product * \f$ I\otimes\cdots\otimes I\otimes A \otimes I \otimes\cdots\otimes I\f$, * with \a A on position \a pos, as a dynamic matrix * over the same scalar field as \a A */ template <typename Derived> dyn_mat<typename Derived::Scalar> expandout(const Eigen::MatrixBase<Derived>& A, idx pos, const std::initializer_list<idx>& dims) const { return this->expandout(A, pos, std::vector<idx>(dims)); } /** * \brief Expands out * \see qpp::kron() * * Expands out \a A as a matrix in a multi-partite system. * Faster than using qpp::kron(I, I, ..., I, A, I, ..., I). * * \param A Eigen expression * \param pos Position * \param n Number of subsystems * \param d Subsystem dimensions * \return Tensor product * \f$ I\otimes\cdots\otimes I\otimes A \otimes I \otimes\cdots\otimes I\f$, * with \a A on position \a pos, as a dynamic matrix * over the same scalar field as \a A */ template <typename Derived> dyn_mat<typename Derived::Scalar> expandout(const Eigen::MatrixBase<Derived>& A, idx pos, idx n, idx d = 2) const { // EXCEPTION CHECKS // check zero size if (!internal::check_nonzero_size(A)) throw exception::ZeroSize("qpp::Gates::expandout()"); // check valid dims if (d == 0) throw exception::DimsInvalid("qpp::Gates::expandout()"); // END EXCEPTION CHECKS std::vector<idx> dims(n, d); // local dimensions vector return this->expandout(A, pos, dims); } // getters /** * \brief Get the name of the most common qubit gates * \note Assumes that the gate \a U is represented by a square matrix. If * not, returns the empty string * * \param U Complex matrix representing the quantum gate * \return The name of the gate (if any), otherwise the empty string */ std::string get_name(const cmat& U) const { // EXCEPTION CHECKS // check zero size if (!internal::check_nonzero_size(U)) throw exception::ZeroSize("qpp::Gates::get_name()"); // check square matrix if (!internal::check_square_mat(U)) return ""; // END EXCEPTION CHECKS const idx D = static_cast<idx>(U.rows()); switch (D) { // 1 qubit gates case 2: if (U == this->Id2) return "Id2"; else if (U == this->H) return "H"; else if (U == this->X) return "X"; else if (U == this->Y) return "Y"; else if (U == this->Z) return "Z"; else if (U == this->S) return "S"; else if (U == this->T) return "T"; else return ""; break; // 2 qubit gates case 4: if (U == this->CNOT) return "CNOT"; else if (U == this->CZ) return "CZ"; else if (U == this->CNOTba) return "CNOTba"; else if (U == this->SWAP) return "SWAP"; else return ""; break; // 3 qubit gates case 8: if (U == this->TOF) return "TOF"; else if (U == this->FRED) return "FRED"; else return ""; break; default: return ""; } } // end getters }; /* class Gates */ } /* namespace qpp */ #endif /* CLASSES_GATES_H_ */
GB_unaryop__lnot_int64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_uint32 // op(A') function: GB_tran__lnot_int64_uint32 // C type: int64_t // A type: uint32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_uint32 ( int64_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tarjan_scc.h
/* * tarjan_scc.h * LLAMA Graph Analytics * * Copyright 2014 * The President and Fellows of Harvard College. * * Copyright 2014 * Oracle Labs. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef LL_GENERATED_CPP_TARJAN_SCC_H #define LL_GENERATED_CPP_TARJAN_SCC_H #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <float.h> #include <limits.h> #include <cmath> #include <algorithm> #include <omp.h> #include "llama/ll_dfs_template.h" #include "llama/ll_writable_graph.h" #include "llama/ll_seq.h" #include "benchmarks/benchmark.h" // BFS/DFS definitions for the procedure template <class Graph> class Tarjan_dfs : public ll_dfs_template <Graph, true, true, true, false> { public: Tarjan_dfs(Graph& _G, node_t*& _G_SCC, bool*& _G_InStack, node_t*& _G_LowLink, ll_node_seq_vec& _Stack, node_t& _n) : ll_dfs_template<Graph, true, true, true, false>(_G), G(_G), G_SCC(_G_SCC), G_InStack(_G_InStack), G_LowLink(_G_LowLink), Stack(_Stack), n(_n){} private: // list of varaibles Graph& G; node_t*& G_SCC; bool*& G_InStack; node_t*& G_LowLink; ll_node_seq_vec& Stack; node_t& n; protected: virtual void visit_pre(node_t t) { G.set_node_prop(G_InStack, t, true); Stack.push_back(t); G.set_node_prop(G_LowLink, t, t); } virtual void visit_post(node_t t) { ll_edge_iterator iter; G.out_iter_begin(iter, t); FOREACH_OUTEDGE_ITER(k_idx, G, iter) { node_t k = iter.last_node; if (G_LowLink[k] < G_LowLink[t]) G.set_node_prop(G_LowLink, t, G_LowLink[k]); } if (G_LowLink[t] == t) { node_t w; w = Stack.pop_back() ; while (w != t) { G.set_node_prop(G_InStack, w, false); G.set_node_prop(G_SCC, w, t); w = Stack.pop_back() ; } G.set_node_prop(G_InStack, w, false); G.set_node_prop(G_SCC, w, t); } } virtual bool check_navigator(node_t t, edge_t t_idx) { return ( !G_InStack[t]); } }; /** * Tarjan's SCC */ template <class Graph> class ll_b_tarjan_scc : public ll_benchmark<Graph> { node_t* G_SCC; public: /** * Create the benchmark */ ll_b_tarjan_scc() : ll_benchmark<Graph>("Tarjan's SCC") { this->create_auto_array_for_nodes(G_SCC); } /** * Destroy the benchmark */ virtual ~ll_b_tarjan_scc(void) { } /** * Run the benchmark * * @return the numerical result, if applicable */ virtual double run(void) { //Initializations Graph& G = *this->_graph; ll_memory_helper m; ll_node_seq_vec Stack(omp_get_max_threads()); bool* G_InStack = m.allocate<bool>(G.max_nodes()); node_t* G_LowLink = m.allocate<node_t>(G.max_nodes()); #pragma omp parallel for for (node_t t0 = 0; t0 < G.max_nodes(); t0 ++) { G.set_node_prop(G_SCC, t0, LL_NIL_NODE); G.set_node_prop(G_InStack, t0, false); } for (node_t n = 0; n < G.max_nodes(); n ++) { if (G_SCC[n] == LL_NIL_NODE) { Tarjan_dfs<Graph> _DFS(G, G_SCC, G_InStack, G_LowLink, Stack, n); _DFS.prepare(n); _DFS.do_dfs(); } } return 0; } /** * Finalize the benchmark * * @return the updated numerical result, if applicable */ virtual double finalize(void) { node_t max = 0; for (node_t n = 0; n < this->_graph->max_nodes(); n++) { if (G_SCC[n] > max) max = G_SCC[n]; } return max; } /** * Print the results * * @param f the output file */ virtual void print_results(FILE* f) { print_results_part(f, this->_graph, G_SCC); } }; #endif
cg.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - CG This benchmark is an OpenMP C version of the NPB CG code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: M. Yarrow C. Kuszmaul OpenMP C version: S. Satoh --------------------------------------------------------------------*/ /* c--------------------------------------------------------------------- c Note: please observe that in the routine conj_grad three c implementations of the sparse matrix-vector multiply have c been supplied. The default matrix-vector multiply is not c loop unrolled. The alternate implementations are unrolled c to a depth of 2 and unrolled to a depth of 8. Please c experiment with these to find the fastest for your particular c architecture. If reporting timing results, any of these three may c be used without penalty. c--------------------------------------------------------------------- */ #include "npb-C.h" #include "npbparams.h" #define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2) #ifdef _OPENARC_ #pragma openarc #define NZ \NA*(\NONZER+1)*(\NONZER+1)+\NA*(\NONZER+2) #endif /* global variables */ /* common /partit_size/ */ static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; /* common /main_int_mem/ */ static int colidx[NZ+1]; /* colidx[1:NZ] */ static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */ static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */ static int arow[NZ+1]; /* arow[1:NZ] */ static int acol[NZ+1]; /* acol[1:NZ] */ /* common /main_flt_mem/ */ static double v[NA+1+1]; /* v[1:NA+1] */ static double aelt[NZ+1]; /* aelt[1:NZ] */ static double a[NZ+1]; /* a[1:NZ] */ static double x[NA+2+1]; /* x[1:NA+2] */ static double z[NA+2+1]; /* z[1:NA+2] */ static double p[NA+2+1]; /* p[1:NA+2] */ static double q[NA+2+1]; /* q[1:NA+2] */ static double r[NA+2+1]; /* r[1:NA+2] */ static double w[NA+2+1]; /* w[1:NA+2] */ /* common /urando/ */ static double amult; static double tran; // Static variables used in conj_grad(). static double d, sum, rho, rho0, alpha, beta; /* function declarations */ static void conj_grad (int colidx[NZ+1], int rowstr[NA+1+1], double x[NA+2+1], double z[NA+2+1], double a[NZ+1], double p[NA+2+1], double q[NA+2+1], double r[NA+2+1], double w[NA+2+1], double *rnorm); static void makea(int n, int nz, double a[NZ+1], int colidx[NZ+1], int rowstr[NA+1+1], int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[NZ+1], int acol[NZ+1], double aelt[NZ+1], double v[NA+1+1], int iv[2*NA+1+1], double shift ); static void sparse(double a[NZ+1], int colidx[NZ+1], int rowstr[NA+1+1], int n, int arow[NZ+1], int acol[NZ+1], double aelt[NZ+1], int firstrow, int lastrow, double x[NA+1+1], boolean mark[NA+1], int nzloc[NA+1], int nnza); static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[], int mark[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[], int iv[], int *nzv, int i, double val); /*-------------------------------------------------------------------- program cg --------------------------------------------------------------------*/ int main(int argc, char **argv) { int i_main, j_main, k_main, it; int nthreads = 1; double zeta; double rnorm; double norm_temp11; double norm_temp12; double t, mflops; char classT = 'U'; boolean verified; double zeta_verify_value, epsilon; //////////////////////////////////// // Used for inlining conj_grad(). // //////////////////////////////////// int i, j, k; int cgit, cgitmax = 25; firstrow = 1; lastrow = NA; firstcol = 1; lastcol = NA; if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) { classT = 'S'; zeta_verify_value = 8.5971775078648; } else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) { classT = 'W'; zeta_verify_value = 10.362595087124; } else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) { classT = 'A'; zeta_verify_value = 17.130235054029; } else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) { classT = 'B'; zeta_verify_value = 22.712745482631; } else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) { classT = 'C'; zeta_verify_value = 28.973605592845; } else { classT = 'U'; } printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - CG Benchmark\n"); printf(" Size: %10d\n", NA); printf(" Iterations: %5d\n", NITER); naa = NA; nzz = NZ; timer_clear(2); timer_clear(3); timer_clear(4); timer_start(2); /*-------------------------------------------------------------------- c Initialize random number generator c-------------------------------------------------------------------*/ tran = 314159265.0; amult = 1220703125.0; zeta = randlc( &tran, amult ); /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ timer_start(4); makea(naa, nzz, a, colidx, rowstr, NONZER, firstrow, lastrow, firstcol, lastcol, RCOND, arow, acol, aelt, v, iv, SHIFT); timer_stop(4); timer_start(3); /*--------------------------------------------------------------------- c Note: as a result of the above call to makea: c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1 c values of colidx which are col indexes go from firstcol --> lastcol c So: c Shift the col index vals from actual (firstcol --> lastcol ) c to local, i.e., (1 --> lastcol-firstcol+1) c---------------------------------------------------------------------*/ #pragma acc data \ create(x[0:NA+3]) \ create(z[0:NA+3]) \ create(p[0:NA+3]) \ create(q[0:NA+3]) \ create(r[0:NA+3]) \ create(w[0:NA+3]) \ copyin(a[0:NZ+1]) \ copyin(colidx[0:NZ+1]) \ copyin(rowstr[0:NA+2]) { timer_stop(3); // R/O Shared scalar: lastrow, firstrow, firstcol // R/O Shared arrays: rowstr[NA+1+1] // R/W Shared arrays: colidx[NZ+1] // R/W Private scalar: j_main, k_main #pragma acc kernels loop gang worker for (j_main = 1; j_main <= lastrow - firstrow + 1; j_main++) { for (k_main = rowstr[j_main]; k_main < rowstr[j_main+1]; k_main++) { colidx[k_main] = colidx[k_main] - firstcol + 1; } } /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ // R/W Shared arrays: x[NA+2+1] // R/W Private scalar: i_main #pragma acc kernels loop gang worker for (i_main = 1; i_main <= NA+1; i_main++) { x[i_main] = 1.0; } // R/W Shared scalar: zeta zeta = 0.0; /*------------------------------------------------------------------- c----> c Do one iteration untimed to init all code and data page tables c----> (then reinit, start timing, to niter its) c-------------------------------------------------------------------*/ for (it = 1; it <= 1; it++) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ //conj_grad (colidx, rowstr, x, z, a, p, q, r, w, &rnorm); cgitmax = 25; // R/W Shared scalars: rho (function-static) rho = 0.0; /*-------------------------------------------------------------------- c Initialize the CG algorithm: c-------------------------------------------------------------------*/ // R/W Shared arrays: x[NA+2+1], r[NA+2+1] // R/W Shared arrays: q[NA+2+1], z[NA+2+1], r[NA+2+1], p[NA+2+1], w[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= NA+1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; w[j] = 0.0; } /*-------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: x[NA+2+1] // R/W Shared scalars: rho (function-static) // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + x[j]*x[j]; } /*-------------------------------------------------------------------- c----> c The conj grad iteration loop c----> c-------------------------------------------------------------------*/ for (cgit = 1; cgit <= cgitmax; cgit++) { // R/W Shared scalars: d, rho, rho0 (function-static) { rho0 = rho; d = 0.0; rho = 0.0; } /* end single */ /*-------------------------------------------------------------------- c q = A.p c The partition submatrix-vector multiply: use workspace w c--------------------------------------------------------------------- C C NOTE: this version of the multiply is actually (slightly: maybe %5) C faster on the sp2 on 16 nodes than is the unrolled-by-2 version C below. On the Cray t3d, the reverse is true, i.e., the C unrolled-by-two version is some 10% faster. C The unrolled-by-8 version below is significantly faster C on the Cray t3d - overall speed of code is 1.5 times faster. */ /* rolled version */ // R/O Shared scalars: lastrow, firstrow // R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], p[NA+2+1], colidx[NZ+1], // R/W Shared arrays: w[NA+2+1] // R/W Private scalars: j, k, sum #pragma acc kernels loop gang worker independent private(sum) for (j = 1; j <= lastrow-firstrow+1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { sum = sum + a[k]*p[colidx[k]]; } w[j] = sum; } // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: w[NA+2+1] // R/W Shared arrays: q[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { q[j] = w[j]; } /*-------------------------------------------------------------------- c Clear w for reuse... c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/W Shared arrays: w[NA+2+1] // R/W Private scalars: j /*-------------------------------------------------------------------- c Obtain p.q c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: p[NA+2+1], q[NA+2+1] // R/W Shared scalars: d (function-static) // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { w[j] = 0.0; d = d + p[j]*q[j]; } /*-------------------------------------------------------------------- c Obtain alpha = rho / (p.q) c-------------------------------------------------------------------*/ // R/O Shared scalars: rho0, d (function-static) // R/W Shared scalars: alpha (function-static) alpha = rho0 / d; /*-------------------------------------------------------------------- c Save a temporary of rho c-------------------------------------------------------------------*/ /* rho0 = rho;*/ /*--------------------------------------------------------------------- c Obtain z = z + alpha*p c and r = r - alpha*q c---------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared scalars: alpha (function-static) // R/O Shared arrays: p[NA+2+1], q[NA+2+1] // R/W Shared arrays: z[NA+2+1], r[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { z[j] = z[j] + alpha*p[j]; r[j] = r[j] - alpha*q[j]; } /*--------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c---------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: r[NA+2+1] // R/W Shared scalars: rho (function-static) // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + r[j]*r[j]; } /*-------------------------------------------------------------------- c Obtain beta: c-------------------------------------------------------------------*/ // R/O Shared scalars: rho0, rho (function-static) // R/W Shared scalars: beta (function-static) beta = rho / rho0; /*-------------------------------------------------------------------- c p = r + beta*p c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared scalars: beta (function-static) // R/O Shared arrays: r[NA+2+1] // R/W Shared arrays: p[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { p[j] = r[j] + beta*p[j]; } } /* end of do cgit=1,cgitmax */ /*--------------------------------------------------------------------- c Compute residual norm explicitly: ||r|| = ||x - A.z|| c First, form A.z c The partition submatrix-vector multiply c---------------------------------------------------------------------*/ // R/W Shared scalars: sum (function-static) sum = 0.0; // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], colidx[NZ+1], z[NA+2+1] // R/W Shared arrays: w[NA+2+1] // R/W Private scalars: j,d,k #pragma acc kernels loop gang worker independent private(d) for (j = 1; j <= lastrow-firstrow+1; j++) { d = 0.0; for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) { d = d + a[k]*z[colidx[k]]; } w[j] = d; } // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: w[NA+2+1] // R/W Shared arrays: r[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { r[j] = w[j]; } /*-------------------------------------------------------------------- c At this point, r contains A.z c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: r[NA+2+1], x[NA+2+1] // R/W Shared scalars: d, sum (function-static) // R/W Private scalars: j #pragma acc kernels loop gang worker independent private(d) for (j = 1; j <= lastcol-firstcol+1; j++) { d = x[j] - r[j]; sum = sum + d*d; } // R/O Shared scalars: sum (function-static) // R/W Shared scalars: rnorm { //(*rnorm) = sqrtf(sum); rnorm = sqrtf(sum); } /* end single */ /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ // R/W Shared scalars: norm_temp11, norm_temp12 { norm_temp11 = 0.0; norm_temp12 = 0.0; } /* end single */ // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: x[NA+2+1], z[NA+2+1] // R/W Shared scalars: norm_temp11, norm_temp12 // R/W Private scalars: j #pragma acc kernels loop gang worker for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) { norm_temp11 = norm_temp11 + x[j_main]*z[j_main]; norm_temp12 = norm_temp12 + z[j_main]*z[j_main]; } // R/w Shared scalars: norm_temp12 norm_temp12 = 1.0 / sqrt( norm_temp12 ); /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol, norm_temp12 // R/O Shared arrays: z[NA+2+1] // R/W Shared arrays: x[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) { x[j_main] = norm_temp12*z[j_main]; } } /* end of do one iteration untimed */ /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ // R/W Shared arrays: x[NA+2+1] // R/W Private scalars: i_main #pragma acc kernels loop gang worker for (i_main = 1; i_main <= NA+1; i_main++) { x[i_main] = 1.0; } // R/W Shared scalars: zeta zeta = 0.0; // } /* end parallel */ timer_clear( 1 ); timer_start( 1 ); /*-------------------------------------------------------------------- c----> c Main Iteration for inverse power method c----> c-------------------------------------------------------------------*/ //#pragma omp parallel private(it,i_main,j_main,k_main) // { for (it = 1; it <= NITER; it++) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ //conj_grad(colidx, rowstr, x, z, a, p, q, r, w, &rnorm); cgitmax = 25; // R/W Shared scalars: rho (function-static) rho = 0.0; /*-------------------------------------------------------------------- c Initialize the CG algorithm: c-------------------------------------------------------------------*/ // R/W Shared arrays: x[NA+2+1], r[NA+2+1] // R/W Shared arrays: q[NA+2+1], z[NA+2+1], r[NA+2+1], p[NA+2+1], w[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= NA+1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; w[j] = 0.0; } /*-------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: x[NA+2+1] // R/W Shared scalars: rho (function-static) // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + x[j]*x[j]; } /*-------------------------------------------------------------------- c----> c The conj grad iteration loop c----> c-------------------------------------------------------------------*/ for (cgit = 1; cgit <= cgitmax; cgit++) { // R/W Shared scalars: d, rho, rho0 (function-static) { rho0 = rho; d = 0.0; rho = 0.0; } /* end single */ /*-------------------------------------------------------------------- c q = A.p c The partition submatrix-vector multiply: use workspace w c--------------------------------------------------------------------- C C NOTE: this version of the multiply is actually (slightly: maybe %5) C faster on the sp2 on 16 nodes than is the unrolled-by-2 version C below. On the Cray t3d, the reverse is true, i.e., the C unrolled-by-two version is some 10% faster. C The unrolled-by-8 version below is significantly faster C on the Cray t3d - overall speed of code is 1.5 times faster. */ /* rolled version */ // R/O Shared scalars: lastrow, firstrow // R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], p[NA+2+1], colidx[NZ+1], // R/W Shared arrays: w[NA+2+1] // R/W Private scalars: j, k, sum #pragma acc kernels loop gang worker independent private(sum) for (j = 1; j <= lastrow-firstrow+1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { sum = sum + a[k]*p[colidx[k]]; } w[j] = sum; } // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: w[NA+2+1] // R/W Shared arrays: q[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { q[j] = w[j]; } /*-------------------------------------------------------------------- c Clear w for reuse... c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/W Shared arrays: w[NA+2+1] // R/W Private scalars: j /*-------------------------------------------------------------------- c Obtain p.q c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: p[NA+2+1], q[NA+2+1] // R/W Shared scalars: d (function-static) // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { w[j] = 0.0; d = d + p[j]*q[j]; } /*-------------------------------------------------------------------- c Obtain alpha = rho / (p.q) c-------------------------------------------------------------------*/ // R/O Shared scalars: rho0, d (function-static) // R/W Shared scalars: alpha (function-static) alpha = rho0 / d; /*-------------------------------------------------------------------- c Save a temporary of rho c-------------------------------------------------------------------*/ /* rho0 = rho;*/ /*--------------------------------------------------------------------- c Obtain z = z + alpha*p c and r = r - alpha*q c---------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared scalars: alpha (function-static) // R/O Shared arrays: p[NA+2+1], q[NA+2+1] // R/W Shared arrays: z[NA+2+1], r[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { z[j] = z[j] + alpha*p[j]; r[j] = r[j] - alpha*q[j]; } /*--------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c---------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: r[NA+2+1] // R/W Shared scalars: rho (function-static) // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + r[j]*r[j]; } /*-------------------------------------------------------------------- c Obtain beta: c-------------------------------------------------------------------*/ // R/O Shared scalars: rho0, rho (function-static) // R/W Shared scalars: beta (function-static) beta = rho / rho0; /*-------------------------------------------------------------------- c p = r + beta*p c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared scalars: beta (function-static) // R/O Shared arrays: r[NA+2+1] // R/W Shared arrays: p[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { p[j] = r[j] + beta*p[j]; } } /* end of do cgit=1,cgitmax */ /*--------------------------------------------------------------------- c Compute residual norm explicitly: ||r|| = ||x - A.z|| c First, form A.z c The partition submatrix-vector multiply c---------------------------------------------------------------------*/ // R/W Shared scalars: sum (function-static) sum = 0.0; // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], colidx[NZ+1], z[NA+2+1] // R/W Shared arrays: w[NA+2+1] // R/W Private scalars: j,d,k #pragma acc kernels loop gang worker independent private(d) for (j = 1; j <= lastrow-firstrow+1; j++) { d = 0.0; for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) { d = d + a[k]*z[colidx[k]]; } w[j] = d; } // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: w[NA+2+1] // R/W Shared arrays: r[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j = 1; j <= lastcol-firstcol+1; j++) { r[j] = w[j]; } /*-------------------------------------------------------------------- c At this point, r contains A.z c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: r[NA+2+1], x[NA+2+1] // R/W Shared scalars: d, sum (function-static) // R/W Private scalars: j #pragma acc kernels loop gang worker independent private(d) for (j = 1; j <= lastcol-firstcol+1; j++) { d = x[j] - r[j]; sum = sum + d*d; } // R/O Shared scalars: sum (function-static) // R/W Shared scalars: rnorm { //(*rnorm) = sqrt(sum); rnorm = sqrt(sum); } /* end single */ /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ // R/W Shared scalars: norm_temp11, norm_temp12 { norm_temp11 = 0.0; norm_temp12 = 0.0; } /* end single */ // R/O Shared scalars: lastcol, firstcol // R/O Shared arrays: x[NA+2+1], z[NA+2+1] // R/W Shared scalars: norm_temp11, norm_temp12 // R/W Private scalars: j_main #pragma acc kernels loop gang worker for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) { norm_temp11 = norm_temp11 + x[j_main]*z[j_main]; norm_temp12 = norm_temp12 + z[j_main]*z[j_main]; } // R/O Shared scalars: norm_temp11 // R/W Shared scalars: norm_temp12, zeta { norm_temp12 = 1.0 / sqrt( norm_temp12 ); zeta = SHIFT + 1.0 / norm_temp11; } /* end single */ { if( it == 1 ) { printf(" iteration ||r|| zeta\n"); } printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta); } /* end master */ /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ // R/O Shared scalars: lastcol, firstcol, norm_temp12 // R/O Shared arrays: z[NA+2+1] // R/W Shared arrays: x[NA+2+1] // R/W Private scalars: j #pragma acc kernels loop gang worker for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) { x[j_main] = norm_temp12*z[j_main]; } } /* end of main iter inv pow meth */ #if defined(_OPENMP) nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop( 1 ); timer_stop( 2 ); /*-------------------------------------------------------------------- c End of timed section c-------------------------------------------------------------------*/ t = timer_read( 1 ); printf(" Benchmark completed\n"); epsilon = 1.0e-10; if (classT != 'U') { if (fabs(zeta - zeta_verify_value) <= epsilon) { verified = TRUE; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.12e\n", zeta); printf(" Error is %20.12e\n", zeta - zeta_verify_value); } else { verified = FALSE; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.12e\n", zeta); printf(" The correct zeta is %20.12e\n", zeta_verify_value); } } else { verified = FALSE; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if ( t != 0.0 ) { mflops = (2.0*NITER*NA) * (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 ) / t / 1000000.0; } else { mflops = 0.0; } c_print_results("CG", classT, NA, 0, 0, NITER, nthreads, t, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); printf("makea() execution time = %12.4f\n", timer_read(4)); printf("CUDA Initialization time = %12.4f\n", timer_read(3)); printf("Total execution time = %12.4f\n", timer_read(2)); return 0; } /*--------------------------------------------------------------------- c generate the test problem for benchmark 6 c makea generates a sparse matrix with a c prescribed sparsity distribution c c parameter type usage c c input c c n i number of cols/rows of matrix c nz i nonzeros as declared array size c rcond r*8 condition number c shift r*8 main diagonal shift c c output c c a r*8 array for nonzeros c colidx i col indices c rowstr i row pointers c c workspace c c iv, arow, acol i c v, aelt r*8 c---------------------------------------------------------------------*/ static void makea( int n, int nz, double a[NZ+1], /* a[1:nz] */ int colidx[NZ+1], /* colidx[1:nz] */ int rowstr[NA+1+1], /* rowstr[1:n+1] */ int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[NZ+1], /* arow[1:nz] */ int acol[NZ+1], /* acol[1:nz] */ double aelt[NZ+1], /* aelt[1:nz] */ double v[NA+1+1], /* v[1:n+1] */ int iv[2*NA+1+1], /* iv[1:2*n+1] */ double shift ) { int i, nnza, iouter, ivelt, ivelt1, irow, nzv; /*-------------------------------------------------------------------- c nonzer is approximately (int(sqrt(nnza /n))); c-------------------------------------------------------------------*/ double size, ratio, scale; int jcol; size = 1.0; ratio = pow(rcond, (1.0 / (double)n)); nnza = 0; /*--------------------------------------------------------------------- c Initialize colidx(n+1 .. 2n) to zero. c Used by sprnvc to mark nonzero positions c---------------------------------------------------------------------*/ // R/O Shared scalars: n // R/W Shared arrays: colidx[NZ+1] // R/W Private scalars: i #pragma acc kernels loop gang worker pcopyout(colidx) for (i = 1; i <= n; i++) { colidx[n+i] = 0; } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n])); vecset(n, v, iv, &nzv, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; if (jcol >= firstcol && jcol <= lastcol) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; if (irow >= firstrow && irow <= lastrow) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } /*--------------------------------------------------------------------- c ... add the identity * rcond to the generated matrix to bound c the smallest eigenvalue from below by rcond c---------------------------------------------------------------------*/ for (i = firstrow; i <= lastrow; i++) { if (i >= firstcol && i <= lastcol) { iouter = n + i; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i; arow[nnza] = i; aelt[nnza] = rcond - shift; } } /*--------------------------------------------------------------------- c ... make the sparse matrix from list of elements with duplicates c (v and iv are used as workspace) c---------------------------------------------------------------------*/ sparse(a, colidx, rowstr, n, arow, acol, aelt, firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza); } /*--------------------------------------------------- c generate a sparse matrix from a list of c [col, row, element] tri c---------------------------------------------------*/ static void sparse( double a[NZ+1], /* a[1:*] */ int colidx[NZ+1], /* colidx[1:*] */ int rowstr[NA+1+1], /* rowstr[1:*] */ int n, int arow[NZ+1], /* arow[1:*] */ int acol[NZ+1], /* acol[1:*] */ double aelt[NZ+1], /* aelt[1:*] */ int firstrow, int lastrow, double x[NA+1+1], /* x[1:n] */ boolean mark[NA+1], /* mark[1:n] */ int nzloc[NA+1], /* nzloc[1:n] */ int nnza) /*--------------------------------------------------------------------- c rows range from firstrow to lastrow c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values c---------------------------------------------------------------------*/ { int nrows; int i, j, jajp1, nza, k, nzrow; double xi; /*-------------------------------------------------------------------- c how many rows of result c-------------------------------------------------------------------*/ nrows = lastrow - firstrow + 1; /*-------------------------------------------------------------------- c ...count the number of triples in each row c-------------------------------------------------------------------*/ // R/O Shared scalars: n // R/W Shared arrays: rowstr[NA+1+1], mark[n] // R/W Private scalars: j #pragma acc kernels loop gang worker independent \ pcopyout(rowstr[0:NA+1+1]) create(mark[0:NA+1]) for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = FALSE; } rowstr[n+1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows+1; j++) { rowstr[j] = rowstr[j] + rowstr[j-1]; } /*--------------------------------------------------------------------- c ... rowstr(j) now is the location of the first nonzero c of row j of a c---------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c ... do a bucket sort of the triples on the row index c-------------------------------------------------------------------*/ for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } /*-------------------------------------------------------------------- c ... rowstr(j) now points to the first element of row j+1 c-------------------------------------------------------------------*/ for (j = nrows; j >= 1; j--) { rowstr[j+1] = rowstr[j]; } rowstr[1] = 1; /*-------------------------------------------------------------------- c ... generate the actual output rows by adding elements c-------------------------------------------------------------------*/ nza = 0; // R/O Shared scalars: n // R/W Shared arrays: x[NA+2+1], mark[n] // R/W Private scalars: i #pragma acc kernels loop gang worker pcopyout(x, mark) for (i = 1; i <= n; i++) { x[i] = 0.0; mark[i] = FALSE; } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; /*-------------------------------------------------------------------- c ...loop over the jth row of a c-------------------------------------------------------------------*/ for (k = jajp1; k < rowstr[j+1]; k++) { i = colidx[k]; x[i] = x[i] + a[k]; if ( mark[i] == FALSE && x[i] != 0.0) { mark[i] = TRUE; nzrow = nzrow + 1; nzloc[nzrow] = i; } } /*-------------------------------------------------------------------- c ... extract the nonzeros of this row c-------------------------------------------------------------------*/ for (k = 1; k <= nzrow; k++) { i = nzloc[k]; mark[i] = FALSE; xi = x[i]; x[i] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i; } } jajp1 = rowstr[j+1]; rowstr[j+1] = nza + rowstr[1]; } } /*--------------------------------------------------------------------- c generate a sparse n-vector (v, iv) c having nzv nonzeros c c mark(i) is set to 1 if position i is nonzero. c mark is all zero on entry and is reset to all zero before exit c this corrects a performance bug found by John G. Lewis, caused by c reinitialization of mark on every one of the n calls to sprnvc ---------------------------------------------------------------------*/ static void sprnvc( int n, int nz, double v[], /* v[1:*] */ int iv[], /* iv[1:*] */ int nzloc[], /* nzloc[1:n] */ int mark[] ) /* mark[1:n] */ { int nn1; int nzrow, nzv, ii, i; double vecelt, vecloc; nzv = 0; nzrow = 0; nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); /*-------------------------------------------------------------------- c nn1 is the smallest power of two not less than n c-------------------------------------------------------------------*/ while (nzv < nz) { vecelt = randlc(&tran, amult); /*-------------------------------------------------------------------- c generate an integer between 1 and n in a portable manner c-------------------------------------------------------------------*/ vecloc = randlc(&tran, amult); i = icnvrt(vecloc, nn1) + 1; if (i > n) continue; /*-------------------------------------------------------------------- c was this integer generated already? c-------------------------------------------------------------------*/ if (mark[i] == 0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; nzv = nzv + 1; v[nzv] = vecelt; iv[nzv] = i; } } for (ii = 1; ii <= nzrow; ii++) { i = nzloc[ii]; mark[i] = 0; } } /*--------------------------------------------------------------------- * scale a double precision number x in (0,1) by a power of 2 and chop it *---------------------------------------------------------------------*/ static int icnvrt(double x, int ipwr2) { return ((int)(ipwr2 * x)); } /*-------------------------------------------------------------------- c set ith element of sparse vector (v, iv) with c nzv nonzeros to val c-------------------------------------------------------------------*/ static void vecset( int n, double v[], /* v[1:*] */ int iv[], /* iv[1:*] */ int *nzv, int i, double val) { int k; boolean set; set = FALSE; for (k = 1; k <= *nzv; k++) { if (iv[k] == i) { v[k] = val; set = TRUE; } } if (set == FALSE) { *nzv = *nzv + 1; v[*nzv] = val; iv[*nzv] = i; } }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; mutable IdentifierInfo *Ident_abstract; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector" /// and "bool" fast comparison. Only present if AltiVec or ZVector are /// enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; IdentifierInfo *Ident_Bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FPEvalMethodHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSFunction; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> MSFenvAccess; std::unique_ptr<PragmaHandler> MSAllocText; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } ObjCContainerDecl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result, Sema::ModuleImportState &ImportState); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, Sema::ModuleImportState &ImportState); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; Sema::ModuleImportState IS = Sema::ModuleImportState::NotACXX20Module; return ParseTopLevelDecl(Result, IS); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSFunction(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSAllocText(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && Tok.getIdentifierInfo() != Ident_Bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser &p) : P(p), PrevPreferredType(P.PreferredType) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; ObjCContainerDecl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(DC); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(DC); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr) {} ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, const ParsedAttributesView &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributes &Attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(ParsedAttributes &Attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributes &Attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(ObjCContainerDecl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(ObjCContainerDecl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseSYCLUniqueStableNameExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>(), bool FailImmediatelyOnInvalidExpr = false, bool EarlyTypoCorrection = false); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, bool MissingOK, ForRangeInfo *FRI = nullptr, bool EnterForConditionScope = false); DeclGroupPtrTy ParseAliasDeclarationInInitStatement(DeclaratorContext Context, ParsedAttributes &Attrs); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); struct DesignatorCompletionInfo { SmallVectorImpl<Expr *> &InitExprs; QualType PreferredBaseType; }; ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributes &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributes &Attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, bool MissingOK, SourceLocation *LParenLoc, SourceLocation *RParenLoc); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributes &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &Attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &Attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributes &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return Tok.is(tok::kw_using) || isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); /// Diagnoses use of _ExtInt as being deprecated, and diagnoses use of /// _BitInt as an extension when appropriate. void DiagnoseBitIntUse(const Token &Tok); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeName, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributes &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributes &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributes &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributes &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesView &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. // For the most cases we don't want to warn on unknown type attributes, but // left them to later diagnoses. However, for a few cases like module // declarations and module import declarations, we should do it. void ProhibitCXX11Attributes(ParsedAttributes &Attrs, unsigned DiagID, bool DiagnoseEmptyAttrs = false, bool WarnOnUnknownAttrs = false); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Emit warnings for C++11 and C2x attributes that are in a position that /// clang accepts as an extension. void DiagnoseCXX11AttributeExtension(ParsedAttributes &Attrs); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); enum ParseAttrKindMask { PAKM_GNU = 1 << 0, PAKM_Declspec = 1 << 1, PAKM_CXX11 = 1 << 2, }; /// \brief Parse attributes based on what syntaxes are desired, allowing for /// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec: /// __attribute__((...)) __declspec(...) __attribute__((...))) /// Note that Microsoft attributes (spelled with single square brackets) are /// not supported by this because of parsing ambiguities with other /// constructs. /// /// There are some attribute parse orderings that should not be allowed in /// arbitrary order. e.g., /// /// [[]] __attribute__(()) int i; // OK /// __attribute__(()) [[]] int i; // Not OK /// /// Such situations should use the specific attribute parsing functionality. void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr); /// \brief Possibly parse attributes based on what syntaxes are desired, /// allowing for the order to vary. bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, LateAttrs); return true; } return false; } void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes Attrs(AttrFactory); ParseGNUAttributes(Attrs, LateAttrs, &D); D.takeAttributes(Attrs); } } bool MaybeParseGNUAttributes(ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParseGNUAttributes(Attrs, LateAttrs); return true; } return false; } void ParseGNUAttributes(ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) { // If parsing the attributes found an OpenMP directive, emit those tokens // to the parse stream now. if (!OpenMPTokens.empty()) { PP.EnterToken(Tok, /*IsReinject*/ true); PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true, /*IsReinject*/ true); ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true); } } void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributes Attrs(AttrFactory); ParseCXX11Attributes(Attrs); D.takeAttributes(Attrs); } } bool MaybeParseCXX11Attributes(ParsedAttributes &Attrs, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) { ParseCXX11Attributes(Attrs); return true; } return false; } void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName, CachedTokens &OpenMPTokens); void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs, CachedTokens &OpenMPTokens, SourceLocation *EndLoc = nullptr); void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr) { CachedTokens OpenMPTokens; ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc); ReplayOpenMPAttributeTokens(OpenMPTokens); } void ParseCXX11Attributes(ParsedAttributes &attrs); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, CachedTokens &OpenMPTokens); IdentifierInfo *TryParseCXX11AttributeIdentifier( SourceLocation &Loc, Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None, const IdentifierInfo *EnclosingScope = nullptr); void MaybeParseHLSLSemantics(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr) { if (getLangOpts().HLSL && Tok.is(tok::colon)) ParseHLSLSemantics(Attrs, EndLoc); } void ParseHLSLSemantics(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr); void MaybeParseMicrosoftAttributes(ParsedAttributes &Attrs) { if ((getLangOpts().MicrosoftExt || getLangOpts().HLSL) && Tok.is(tok::l_square)) { ParsedAttributes AttrsWithRange(AttrFactory); ParseMicrosoftAttributes(AttrsWithRange); Attrs.takeAllFrom(AttrsWithRange); } } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &Attrs); bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs) { if (getLangOpts().DeclSpecKeyword && Tok.is(tok::kw___declspec)) { ParseMicrosoftDeclSpecs(Attrs); return true; } return false; } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); void ParseCUDAFunctionAttributes(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); void ParsePtrauthQualifier(ParsedAttributes &Attrs); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; bool isClassCompatibleKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &FirstArgAttrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributes &Attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &Attrs, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributes &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributes &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(AccessSpecifier &AS, ParsedAttributes &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse an 'append_args' clause for '#pragma omp declare variant'. bool parseOpenMPAppendArgs( SmallVectorImpl<OMPDeclareVariantAttr::InteropType> &InterOpTypes); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse 'omp [begin] assume[s]' directive. void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parse 'omp end assumes' directive. void ParseOpenMPEndAssumesDirective(SourceLocation Loc); /// Parse clauses for '#pragma omp [begin] declare target'. void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind, OpenMPDirectiveKind EndDKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributes &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. /// \param ReadDirectiveWithinMetadirective true if directive is within a /// metadirective and therefore ends on the closing paren. StmtResult ParseOpenMPDeclarativeOrExecutableDirective( ParsedStmtContext StmtCtx, bool ReadDirectiveWithinMetadirective = false); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses indirect clause /// \param ParseOnly true to skip the clause's semantic actions and return // false; bool ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses the 'sizes' clause of a '#pragma omp tile' directive. OMPClause *ParseOpenMPSizesClause(); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); /// Parses clause with an interop variable of kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. // OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc, TemplateTy NameHint = nullptr); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs, TemplateTy Template, SourceLocation OpenLoc); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(Sema::ModuleImportState &ImportState); Decl *ParseModuleImport(SourceLocation AtLoc, Sema::ModuleImportState &ImportState); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); ExprResult ParseBuiltinPtrauthTypeDiscriminator(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
GB_unop__exp2_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__exp2_fc32_fc32) // op(A') function: GB (_unop_tran__exp2_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_cexp2f (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexp2f (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_cexp2f (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP2 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__exp2_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cexp2f (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cexp2f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__exp2_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
iochain.h
/* * IOchain - Distribute a chain of dependant IO events amoung threads. * * This file is part of Bitshuffle * Author: Kiyoshi Masui <kiyo@physics.ubc.ca> * Website: http://www.github.com/kiyo-masui/bitshuffle * Created: 2014 * * See LICENSE file for details about copyright and rights to use. * * * Header File * * Similar in concept to a queue. Each task includes reading an input * and writing output, but the location of the input/output (the pointers) * depend on the previous item in the chain. * * This is designed for parallelizing blocked compression/decompression IO, * where the destination of a compressed block depends on the compressed size * of all previous blocks. * * Implemented with OpenMP locks. * * * Usage * ----- * - Call `ioc_init` in serial block. * - Each thread should create a local variable *size_t this_iter* and * pass its address to all function calls. Its value will be set * inside the functions and is used to identify the thread. * - Each thread must call each of the `ioc_get*` and `ioc_set*` methods * exactly once per iteration, starting with `ioc_get_in` and ending * with `ioc_set_next_out`. * - The order (`ioc_get_in`, `ioc_set_next_in`, *work*, `ioc_get_out`, * `ioc_set_next_out`, *work*) is most efficient. * - Have each thread call `ioc_end_pop`. * - `ioc_get_in` is blocked until the previous entry's * `ioc_set_next_in` is called. * - `ioc_get_out` is blocked until the previous entry's * `ioc_set_next_out` is called. * - There are no blocks on the very first iteration. * - Call `ioc_destroy` in serial block. * - Safe for num_threads >= IOC_SIZE (but less efficient). * */ #ifndef IOCHAIN_H #define IOCHAIN_H #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif #define IOC_SIZE 33 typedef struct ioc_ptr_and_lock { #ifdef _OPENMP omp_lock_t lock; #endif void *ptr; } ptr_and_lock; typedef struct ioc_chain { #ifdef _OPENMP omp_lock_t next_lock; #endif size_t next; ptr_and_lock in_pl[IOC_SIZE]; ptr_and_lock out_pl[IOC_SIZE]; } ioc_chain; void ioc_init(ioc_chain *C, void *in_ptr_0, void *out_ptr_0) { #ifdef _OPENMP omp_init_lock(&C->next_lock); for (size_t ii = 0; ii < IOC_SIZE; ii ++) { omp_init_lock(&(C->in_pl[ii].lock)); omp_init_lock(&(C->out_pl[ii].lock)); } #endif C->next = 0; C->in_pl[0].ptr = in_ptr_0; C->out_pl[0].ptr = out_ptr_0; } void ioc_destroy(ioc_chain *C) { #ifdef _OPENMP omp_destroy_lock(&C->next_lock); for (size_t ii = 0; ii < IOC_SIZE; ii ++) { omp_destroy_lock(&(C->in_pl[ii].lock)); omp_destroy_lock(&(C->out_pl[ii].lock)); } #endif } void * ioc_get_in(ioc_chain *C, size_t *this_iter) { #ifdef _OPENMP omp_set_lock(&C->next_lock); #pragma omp flush #endif *this_iter = C->next; C->next ++; #ifdef _OPENMP omp_set_lock(&(C->in_pl[*this_iter % IOC_SIZE].lock)); omp_set_lock(&(C->in_pl[(*this_iter + 1) % IOC_SIZE].lock)); omp_set_lock(&(C->out_pl[(*this_iter + 1) % IOC_SIZE].lock)); omp_unset_lock(&C->next_lock); #endif return C->in_pl[*this_iter % IOC_SIZE].ptr; } void ioc_set_next_in(ioc_chain *C, size_t* this_iter, void* in_ptr) { C->in_pl[(*this_iter + 1) % IOC_SIZE].ptr = in_ptr; #ifdef _OPENMP omp_unset_lock(&(C->in_pl[(*this_iter + 1) % IOC_SIZE].lock)); #endif } void * ioc_get_out(ioc_chain *C, size_t *this_iter) { #ifdef _OPENMP omp_set_lock(&(C->out_pl[(*this_iter) % IOC_SIZE].lock)); #pragma omp flush #endif void *out_ptr = C->out_pl[*this_iter % IOC_SIZE].ptr; #ifdef _OPENMP omp_unset_lock(&(C->out_pl[(*this_iter) % IOC_SIZE].lock)); #endif return out_ptr; } void ioc_set_next_out(ioc_chain *C, size_t *this_iter, void* out_ptr) { C->out_pl[(*this_iter + 1) % IOC_SIZE].ptr = out_ptr; #ifdef _OPENMP omp_unset_lock(&(C->out_pl[(*this_iter + 1) % IOC_SIZE].lock)); // *in_pl[this_iter]* lock released at the end of the iteration to avoid being // overtaken by previous threads and having *out_pl[this_iter]* corrupted. // Especially worried about thread 0, iteration 0. omp_unset_lock(&(C->in_pl[(*this_iter) % IOC_SIZE].lock)); #endif } #endif // IOCHAIN_H
main-brisbane.c
/* $Id: main.c,v 1.4 2004/04/21 04:23:43 pohlt Exp $ */ /*############################################################################*/ #include "main.h" #include "lbm-brisbane.h" #include <stdio.h> #include <stdlib.h> #if defined(SPEC) # include <time.h> #else # include <sys/times.h> # include <unistd.h> #endif #include <sys/stat.h> /*############################################################################*/ static LBM_GridPtr srcGrid, dstGrid; size_t gridSize; size_t marginSize; double * src; double * dst; brisbane_mem mem_src; brisbane_mem mem_dst; brisbane_mem mem_srcGrid; brisbane_mem mem_dstGrid; /*############################################################################*/ int main( int nArgs, char* arg[] ) { brisbane_init(&nArgs, &arg); MAIN_Param param; int t; MAIN_parseCommandLine( nArgs, arg, &param ); MAIN_printInfo( &param ); MAIN_initialize( &param ); printf("[%s:%d] gridSize[%lu] nTimeStep[%d]\n", __FILE__, __LINE__, gridSize, param.nTimeSteps); brisbane_mem_create(sizeof(double) * gridSize, &mem_src); brisbane_mem_create(sizeof(double) * gridSize, &mem_dst); mem_srcGrid = mem_src; mem_dstGrid = mem_dst; #pragma omp target data map(tofrom:src[0:gridSize]), map(to:dst[0:gridSize]) brisbane_task task0; brisbane_task_create(&task0); brisbane_task_h2d_full(task0, mem_src, (void*) src); brisbane_task_h2d_full(task0, mem_dst, (void*) dst); brisbane_task_submit(task0, brisbane_cpu, NULL, true); { for( t = 1; t <= param.nTimeSteps; t++ ) { if( param.simType == CHANNEL ) { LBM_handleInOutFlow( *srcGrid ); } LBM_performStreamCollide( *srcGrid, *dstGrid ); LBM_swapGrids( &srcGrid, &dstGrid ); if( (t & 63) == 0 ) { #pragma omp target update from(src[0:gridSize]) brisbane_task task0; brisbane_task_create(&task0); brisbane_task_d2h_full(task0, mem_srcGrid, (void*) (srcGrid - marginSize)); brisbane_task_submit(task0, brisbane_cpu, NULL, true); printf( "timestep: %i\n", t ); LBM_showGridStatistics( *srcGrid ); } } } brisbane_mem_release(mem_src); brisbane_mem_release(mem_dst); MAIN_finalize( &param ); brisbane_finalize(); return 0; } /*############################################################################*/ void MAIN_parseCommandLine( int nArgs, char* arg[], MAIN_Param* param ) { struct stat fileStat; int adjustArgs = 0; /* SPEC - handle one of --device/--platform */ if ( nArgs == 8 ) adjustArgs+= 2; /* SPEC - handle both --device/--platform */ if ( nArgs == 10 ) adjustArgs+= 4; if( nArgs < adjustArgs+5 || nArgs > adjustArgs+6 ) { printf( "syntax: lbm <time steps> <result file> <0: nil, 1: cmp, 2: str> <0: ldc, 1: channel flow> [<obstacle file>]\n" ); exit( 1 ); } param->nTimeSteps = atoi( arg[adjustArgs+1] ); param->resultFilename = arg[adjustArgs+2]; param->action = (MAIN_Action) atoi( arg[adjustArgs+3] ); param->simType = (MAIN_SimType) atoi( arg[adjustArgs+4] ); if( nArgs == adjustArgs+6 ) { param->obstacleFilename = arg[adjustArgs+5]; if( stat( param->obstacleFilename, &fileStat ) != 0 ) { printf( "MAIN_parseCommandLine: cannot stat obstacle file '%s'\n", param->obstacleFilename ); exit( 1 ); } if( fileStat.st_size != SIZE_X*SIZE_Y*SIZE_Z+(SIZE_Y+1)*SIZE_Z ) { printf( "MAIN_parseCommandLine:\n" "\tsize of file '%s' is %i bytes\n" "\texpected size is %i bytes\n", param->obstacleFilename, (int) fileStat.st_size, SIZE_X*SIZE_Y*SIZE_Z+(SIZE_Y+1)*SIZE_Z ); exit( 1 ); } } else param->obstacleFilename = NULL; if( param->action == COMPARE && stat( param->resultFilename, &fileStat ) != 0 ) { printf( "MAIN_parseCommandLine: cannot stat result file '%s'\n", param->resultFilename ); exit( 1 ); } } /*############################################################################*/ void MAIN_printInfo( const MAIN_Param* param ) { const char actionString[3][32] = {"nothing", "compare", "store"}; const char simTypeString[3][32] = {"lid-driven cavity", "channel flow"}; printf( "MAIN_printInfo:\n" "\tgrid size : %i x %i x %i = %.2f * 10^6 Cells\n" "\tnTimeSteps : %i\n" "\tresult file : %s\n" "\taction : %s\n" "\tsimulation type: %s\n" "\tobstacle file : %s\n\n", SIZE_X, SIZE_Y, SIZE_Z, 1e-6*SIZE_X*SIZE_Y*SIZE_Z, param->nTimeSteps, param->resultFilename, actionString[param->action], simTypeString[param->simType], (param->obstacleFilename == NULL) ? "<none>" : param->obstacleFilename ); } /*############################################################################*/ void MAIN_initialize( const MAIN_Param* param) { LBM_allocateGrid( (double**) &srcGrid, (double**) &src ); LBM_allocateGrid( (double**) &dstGrid, (double**) &dst ); LBM_initializeGrid( *srcGrid ); LBM_initializeGrid( *dstGrid ); if( param->obstacleFilename != NULL ) { LBM_loadObstacleFile( *srcGrid, param->obstacleFilename ); LBM_loadObstacleFile( *dstGrid, param->obstacleFilename ); } if( param->simType == CHANNEL ) { LBM_initializeSpecialCellsForChannel( *srcGrid ); LBM_initializeSpecialCellsForChannel( *dstGrid ); } else { LBM_initializeSpecialCellsForLDC( *srcGrid ); LBM_initializeSpecialCellsForLDC( *dstGrid ); } LBM_showGridStatistics( *srcGrid ); } /*############################################################################*/ void MAIN_finalize( const MAIN_Param* param ) { LBM_showGridStatistics( *srcGrid ); if( param->action == COMPARE ) LBM_compareVelocityField( *srcGrid, param->resultFilename, TRUE ); if( param->action == STORE ) LBM_storeVelocityField( *srcGrid, param->resultFilename, TRUE ); LBM_freeGrid( (double**) &srcGrid ); LBM_freeGrid( (double**) &dstGrid ); }
grid.c
#include <mpi.h> int *cn_c; int *ce_c; int *ec_c; int *cn_crem; int *ce_crem; int *ec_crem; int *neighbor_map; int *cedge_map; int *ecell_map; int *neighbor_maprem; int *cedge_maprem; int *ecell_maprem; GVAL **neighbor_2Dbuf; GVAL **neighbor_3Dbuf; GVAL **cedge_2Dbuf; GVAL **cedge_3Dbuf; GVAL **ecell_2Dbuf; GVAL **ecell_3Dbuf; GVAL **neighbor_2Dbufrem; GVAL **neighbor_3Dbufrem; GVAL **cedge_2Dbufrem; GVAL **cedge_3Dbufrem; GVAL **ecell_2Dbufrem; GVAL **ecell_3Dbufrem; MPI_Request *mpi_send_requests; MPI_Request *mpi_recv_requests; int comm_tag; int local_cell_blocks; int local_edge_blocks; #include "grid.h" #include "memory.h" int transform(int n, int x, int y) { int rx, ry, s, d = 0, t; for (s = n / 2; s > 0; s /= 2) { rx = (x & s) > 0; ry = (y & s) > 0; d += s * s * ((3 * rx) ^ ry); if (ry == 0) { if (rx) { x = n - 1 - x; y = n - 1 - y; } t = x; x = y; y = t; } } return d; } void create_maps(GRID * g) { int x = g->height, y = g->height; g->map1 = malloc(x * sizeof(int *)); g->map2 = malloc(g->cellCount * sizeof(map_t)); for (int i = 0; i < x; i++) { g->map1[i] = malloc(y * sizeof(int)); for (int j = 0; j < y; j++) { int t = transform(x, i, j); g->map1[i][j] = t; g->map2[t].i = i; g->map2[t].j = j; } } } #ifndef NBRS #error "please define NBRS" #endif #if NBRS==3 int calc_edge_count(GRID * g) { return (g->cellCount * 3) / 2; } void tessellation(GRID * g) { for (int i = 0; i < NBRS; i++) { g->neighbor[i] = malloc((g->cellCount) * sizeof(int)); g->cedge[i] = malloc((g->cellCount) * sizeof(int)); } for (int i = 0; i < 2; i++) { g->ecell[i] = malloc((g->edgeCount) * sizeof(int)); } int x = g->height, y = g->height; for (int i = 0; i < x - 1; i++) for (int j = 0; j < y; j++) g->neighbor[0][g->map1[i][j]] = g->map1[i + 1][j]; for (int j = 0; j < y; j++) g->neighbor[0][g->map1[x - 1][j]] = g->map1[0][j]; for (int i = 1; i < x; i++) for (int j = 0; j < y; j++) g->neighbor[1][g->map1[i][j]] = g->map1[i - 1][j]; for (int j = 0; j < y; j++) g->neighbor[1][g->map1[0][j]] = g->map1[x - 1][j]; for (int i = 0; i < x; i += 2) g->neighbor[2][g->map1[i][0]] = g->map1[i][y - 1]; for (int i = 0; i < x; i += 2) for (int j = 2; j < y; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i][j - 1]; for (int i = 1; i < x; i += 2) for (int j = 1; j < y; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i][j - 1]; for (int i = 1; i < x; i += 2) for (int j = 0; j < y - 1; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i][j + 1]; for (int i = 0; i < x; i += 2) for (int j = 1; j < y - 1; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i][j + 1]; for (int i = y % 2; i < x; i += 2) g->neighbor[2][g->map1[i][y - 1]] = g->map1[i][0]; for (int c = 0; c < g->cellCount; c++) { g->cedge[0][c] = (c * 3) / 2; g->cedge[1][g->neighbor[0][c]] = g->cedge[0][c]; g->ecell[0][g->cedge[0][c]] = g->neighbor[0][c]; g->ecell[1][g->cedge[0][c]] = c; } for (int c = 0; c < g->cellCount; c += 2) { g->cedge[2][c] = (c * 3) / 2 + 2; g->cedge[2][g->neighbor[2][c]] = g->cedge[2][c]; g->ecell[0][g->cedge[2][c]] = c; g->ecell[1][g->cedge[2][c]] = g->neighbor[2][c]; } } void init_edge_weights(GRID * g) { GVAL j = -1.0; for (int i = 0; i < BLKSIZE; i++) { g->edge_weights[0][i] = 1.0; g->edge_weights[1][i] = -1.0; g->edge_weights[2][i] = j; j = j * -1; } } #elif NBRS==4 int calc_edge_count(GRID * g) { return (int) (((float) g->cellCount * NBRS / 2.0) + (2.0 * g->height)); } void tessellation(GRID * g) { for (int i = 0; i < NBRS; i++) { g->neighbor[i] = malloc((g->cellCount) * sizeof(int)); g->cedge[i] = malloc((g->cellCount) * sizeof(int)); } for (int i = 0; i < 2; i++) { g->ecell[i] = malloc((g->edgeCount) * sizeof(int)); } int x = g->height, y = g->height; for (int i = 0; i < x; i++) { // for (int j = 0; j < y; j++) { if (i < x - 1) g->neighbor[0][g->map1[i][j]] = g->map1[i + 1][j]; else g->neighbor[0][g->map1[i][j]] = g->cellCount; if (j < y - 1) g->neighbor[1][g->map1[i][j]] = g->map1[i][j + 1]; else g->neighbor[1][g->map1[i][j]] = g->cellCount; if (i > 0) g->neighbor[2][g->map1[i][j]] = g->map1[i - 1][j]; else g->neighbor[2][g->map1[i][j]] = g->cellCount; if (j > 0) g->neighbor[3][g->map1[i][j]] = g->map1[i][j - 1]; else g->neighbor[3][g->map1[i][j]] = g->cellCount; g->neighbor[0][g->cellCount] = g->neighbor[1][g->cellCount] = g->neighbor[2][g->cellCount] = g->neighbor[3][g->cellCount] = g->cellCount; // g->cedge[0][g->map1[i][j]] = g->map1[i][j]; g->cedge[1][g->map1[i][j]] = g->cellCount + g->map1[i][j]; if (i > 0) g->cedge[2][g->map1[i][j]] = g->map1[i - 1][j]; else g->cedge[2][g->map1[i][j]] = g->cellCount * 2 + j; if (j > 0) g->cedge[3][g->map1[i][j]] = g->cellCount + g->map1[i][j - 1]; else g->cedge[3][g->map1[i][j]] = g->cellCount * 2 + y + i; g->ecell[1][g->map1[i][j]] = g->map1[i][j]; g->ecell[1][g->cellCount + g->map1[i][j]] = g->map1[i][j]; if (i > 0) g->ecell[0][g->map1[i - 1][j]] = g->map1[i][j]; else g->ecell[0][g->cellCount * 2 + j] = g->map1[i][j]; if (j > 0) g->ecell[0][g->cellCount + g->map1[i][j - 1]] = g->map1[i][j]; else g->ecell[0][g->cellCount * 2 + y + i] = g->map1[i][j]; g->ecell[0][g->map1[x - 1][j]] = g->cellCount; //TODO: out of loop g->ecell[0][g->cellCount + g->map1[i][y - 1]] = g->cellCount; // g->ecell[1][g->cellCount * 2 + j] = g->cellCount; //TODO: out of loop g->ecell[1][g->cellCount * 2 + y + i] = g->cellCount; } } } // void init_edge_weights(GRID * g) { for (int i = 0; i < BLKSIZE; i++) { g->edge_weights[0][i] = 1.0; g->edge_weights[1][i] = 1.0; g->edge_weights[2][i] = -1.0; g->edge_weights[3][i] = -1.0; } } #elif NBRS==6 int calc_edge_count(GRID * g) { return (int) (((float) g->cellCount * NBRS / 2.0) + (4.0 * g->height) - 1); } void tessellation(GRID * g) { for (int i = 0; i < NBRS; i++) { g->neighbor[i] = malloc((g->cellCount) * sizeof(int)); g->cedge[i] = malloc((g->cellCount) * sizeof(int)); } for (int i = 0; i < 2; i++) { g->ecell[i] = malloc((g->edgeCount) * sizeof(int)); } int x = g->height, y = g->height; for (int i = 0; i < x; i++) for (int j = 0; j < y; j += 2) g->neighbor[0][g->map1[i][j]] = g->map1[i][j + 1]; for (int i = 0; i < x - 1; i++) for (int j = 1; j < y - 1; j += 2) g->neighbor[0][g->map1[i][j]] = g->map1[i + 1][j + 1]; for (int j = 1; j < y - 1; j += 2) g->neighbor[0][g->map1[x - 1][j]] = g->cellCount; for (int i = 0; i < x; i++) g->neighbor[0][g->map1[i][y - 1]] = g->cellCount; for (int i = 0; i < x - 1; i++) for (int j = 0; j < y; j++) g->neighbor[1][g->map1[i][j]] = g->map1[i + 1][j]; for (int j = 0; j < y; j++) g->neighbor[1][g->map1[x - 1][j]] = g->cellCount; for (int i = 0; i < x; i++) g->neighbor[2][g->map1[i][0]] = g->cellCount; for (int i = 0; i < x; i++) for (int j = 2; j < y; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i][j - 1]; for (int i = 0; i < x - 1; i++) for (int j = 1; j < y; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i + 1][j - 1]; for (int j = 1; j < y; j += 2) g->neighbor[2][g->map1[x - 1][j]] = g->cellCount; for (int i = 0; i < x; i++) g->neighbor[3][g->map1[i][0]] = g->cellCount; for (int i = 1; i < x; i++) for (int j = 2; j < y; j += 2) g->neighbor[3][g->map1[i][j]] = g->map1[i - 1][j - 1]; for (int j = 2; j < y; j += 2) g->neighbor[3][g->map1[0][j]] = g->cellCount; for (int i = 0; i < x; i++) for (int j = 1; j < y; j += 2) g->neighbor[3][g->map1[i][j]] = g->map1[i][j - 1]; for (int i = 1; i < x; i++) for (int j = 0; j < y; j++) g->neighbor[4][g->map1[i][j]] = g->map1[i - 1][j]; for (int j = 0; j < y; j++) g->neighbor[4][g->map1[0][j]] = g->cellCount; for (int i = 1; i < x; i++) for (int j = 0; j < y; j += 2) g->neighbor[5][g->map1[i][j]] = g->map1[i - 1][j + 1]; for (int j = 0; j < y; j += 2) g->neighbor[5][g->map1[0][j]] = g->cellCount; for (int i = 0; i < x; i++) for (int j = 1; j < y - 1; j += 2) g->neighbor[5][g->map1[i][j]] = g->map1[i][j + 1]; for (int i = 0; i < x; i++) g->neighbor[5][g->map1[i][y - 1]] = g->cellCount; for (int i = 0; i < x; i++) for (int j = 0; j < y; j++) { g->cedge[0][g->map1[i][j]] = g->map1[i][j]; g->cedge[1][g->map1[i][j]] = g->cellCount + g->map1[i][j]; g->cedge[2][g->map1[i][j]] = g->cellCount * 2 + g->map1[i][j]; } for (int i = 0; i < x; i++) for (int j = 0; j < y; j++) { if (j == 0 || ((i == 0) && (j % 2 == 0))) g->cedge[3][g->map1[i][j]] = g->cellCount * 3 + (j == 0 ? i : x + j / 2 - 1); else g->cedge[3][g->map1[i][j]] = g->cedge[0][g->neighbor[3][g->map1[i][j]]]; if (i == 0) g->cedge[4][g->map1[i][j]] = g->cellCount * 3 + x + x / 2 + j - 1; else g->cedge[4][g->map1[i][j]] = g->cedge[1][g->neighbor[4][g->map1[i][j]]]; if ((j == y - 1) || ((i == 0) && (j % 2 == 0))) g->cedge[5][g->map1[i][j]] = g->cellCount * 3 + x + x / 2 + y - 1 + (j == y - 1 ? y / 2 + i : j / 2); else g->cedge[5][g->map1[i][j]] = g->cedge[2][g->neighbor[5][g->map1[i][j]]]; } for (int c = 0; c < g->cellCount; c++) { g->ecell[0][g->cedge[0][c]] = g->neighbor[0][c]; g->ecell[1][g->cedge[0][c]] = c; g->ecell[0][g->cedge[1][c]] = g->neighbor[1][c]; g->ecell[1][g->cedge[1][c]] = c; g->ecell[0][g->cedge[2][c]] = g->neighbor[2][c]; g->ecell[1][g->cedge[2][c]] = c; g->ecell[0][g->cedge[3][c]] = c; g->ecell[1][g->cedge[3][c]] = g->neighbor[3][c]; g->ecell[0][g->cedge[4][c]] = c; g->ecell[1][g->cedge[4][c]] = g->neighbor[4][c]; g->ecell[0][g->cedge[5][c]] = c; g->ecell[1][g->cedge[5][c]] = g->neighbor[5][c]; } } void init_edge_weights(GRID * g) { for (int i = 0; i < BLKSIZE; i++) { g->edge_weights[0][i] = g->edge_weights[1][i] = g->edge_weights[2][i] = 1.0; g->edge_weights[3][i] = g->edge_weights[4][i] = g->edge_weights[5][i] = -1.0; } } #else #error "supported shapes are traiangles,rectangles and hexagons" #endif void init_blocking(GRID * g) { for (int i = 0; i < NBRS; i++) { { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->cNeighborIdx[i] = malloc(24); g->cNeighborIdx[i]->name = "g->cNeighborIdx[ i]"; g->cNeighborIdx[i]->loc = 0; g->cNeighborIdx[i]->dim = 2; g->cNeighborIdx[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->cNeighborIdx[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->cNeighborIdx[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int c = 0; c < g->blkSize; c++) { g->cNeighborIdx[i]->data_pointer.p2[b][c] = (int) 0; } } } { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->cNeighborBlk[i] = malloc(24); g->cNeighborBlk[i]->name = "g->cNeighborBlk[ i]"; g->cNeighborBlk[i]->loc = 0; g->cNeighborBlk[i]->dim = 2; g->cNeighborBlk[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->cNeighborBlk[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->cNeighborBlk[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int c = 0; c < g->blkSize; c++) { g->cNeighborBlk[i]->data_pointer.p2[b][c] = (int) 0; } } } } int first_cBlock = g->mpi_rank * ((g->cBlkCnt + g->mpi_world_size - 1) / g->mpi_world_size); int first_eBlock = g->mpi_rank * ((g->eBlkCnt + g->mpi_world_size - 1) / g->mpi_world_size); for (int i = 0; i < NBRS; i++) { { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { if ((g->mpi_rank == g->mpi_world_size - 1) && (block_index == (g->cBlkCnt - 1) % ((g->cBlkCnt + g->mpi_world_size - 1) / g->mpi_world_size)) && (cell_index > (g->cellCount - 1) % g->blkSize)) { g->cNeighborIdx[i]->data_pointer.p2[(block_index)][(cell_index)] = cell_index; g->cNeighborBlk[i]->data_pointer.p2[(block_index)][(cell_index)] = block_index; } else { g->cNeighborIdx[i]->data_pointer.p2[(block_index)][(cell_index)] = g->neighbor[i][(first_cBlock + block_index) * g->blkSize + cell_index] % g->blkSize; g->cNeighborBlk[i]->data_pointer.p2[(block_index)][(cell_index)] = g->neighbor[i][(first_cBlock + block_index) * g->blkSize + cell_index] / g->blkSize; } } } } } for (int i = 0; i < NBRS; i++) { { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->cEdgeIdx[i] = malloc(24); g->cEdgeIdx[i]->name = "g->cEdgeIdx[ i]"; g->cEdgeIdx[i]->loc = 0; g->cEdgeIdx[i]->dim = 2; g->cEdgeIdx[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->cEdgeIdx[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->cEdgeIdx[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int c = 0; c < g->blkSize; c++) { g->cEdgeIdx[i]->data_pointer.p2[b][c] = (int) 0; } } } { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->cEdgeBlk[i] = malloc(24); g->cEdgeBlk[i]->name = "g->cEdgeBlk[ i]"; g->cEdgeBlk[i]->loc = 0; g->cEdgeBlk[i]->dim = 2; g->cEdgeBlk[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->cEdgeBlk[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->cEdgeBlk[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int c = 0; c < g->blkSize; c++) { g->cEdgeBlk[i]->data_pointer.p2[b][c] = (int) 0; } } } } for (int i = 0; i < NBRS; i++) { { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { if ((g->mpi_rank == g->mpi_world_size - 1) && (block_index == (g->cBlkCnt - 1) % ((g->cBlkCnt + g->mpi_world_size - 1) / g->mpi_world_size)) && (cell_index > (g->cellCount - 1) % g->blkSize)) { g->cEdgeIdx[i]->data_pointer.p2[(block_index)][(cell_index)] = 0; g->cEdgeBlk[i]->data_pointer.p2[(block_index)][(cell_index)] = first_eBlock; } else { g->cEdgeIdx[i]->data_pointer.p2[(block_index)][(cell_index)] = g->cedge[i][(first_cBlock + block_index) * g->blkSize + cell_index] % g->blkSize; g->cEdgeBlk[i]->data_pointer.p2[(block_index)][(cell_index)] = g->cedge[i][(first_cBlock + block_index) * g->blkSize + cell_index] / g->blkSize; } } } } } for (int i = 0; i < 2; i++) { { int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->eCellIdx[i] = malloc(24); g->eCellIdx[i]->name = "g->eCellIdx[ i]"; g->eCellIdx[i]->loc = 1; g->eCellIdx[i]->dim = 2; g->eCellIdx[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->eCellIdx[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->eCellIdx[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int e = 0; e < g->blkSize; e++) { g->eCellIdx[i]->data_pointer.p2[b][e] = (int) 0; } } } { int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->eCellBlk[i] = malloc(24); g->eCellBlk[i]->name = "g->eCellBlk[ i]"; g->eCellBlk[i]->loc = 1; g->eCellBlk[i]->dim = 2; g->eCellBlk[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->eCellBlk[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->eCellBlk[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int e = 0; e < g->blkSize; e++) { g->eCellBlk[i]->data_pointer.p2[b][e] = (int) 0; } } } } for (int i = 0; i < 2; i++) { { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { if ((g->mpi_rank == g->mpi_world_size - 1) && (block_index == (g->eBlkCnt - 1) % ((g->eBlkCnt + g->mpi_world_size - 1) / g->mpi_world_size)) && (edge_index > (g->edgeCount - 1) % g->blkSize)) { g->eCellIdx[i]->data_pointer.p2[(block_index)][(edge_index)] = 0; g->eCellBlk[i]->data_pointer.p2[(block_index)][(edge_index)] = first_cBlock; } else { g->eCellIdx[i]->data_pointer.p2[(block_index)][(edge_index)] = g->ecell[i][(first_eBlock + block_index) * g->blkSize + edge_index] % g->blkSize; g->eCellBlk[i]->data_pointer.p2[(block_index)][(edge_index)] = g->ecell[i][(first_eBlock + block_index) * g->blkSize + edge_index] / g->blkSize; } } } } } } void init_grid(GRID * g, int cellCount, int height) { g->cellCount = cellCount * cellCount; //cellCount; g->height = cellCount; g->edgeCount = calc_edge_count(g); g->blkSize = BLKSIZE; g->cBlkCnt = (g->cellCount + g->blkSize - 1) / g->blkSize; g->eBlkCnt = (g->edgeCount + g->blkSize - 1) / g->blkSize; create_maps(g); tessellation(g); g->height = height; init_edge_weights(g); init_blocking(g); { int cell_min = 0; int cell_max = g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); int edge_min = 0; int edge_max = g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); int *cn_H = malloc(g->mpi_world_size * sizeof(int) * 2); cn_c = malloc(g->mpi_world_size * sizeof(int)); for (int i = 0; i < g->mpi_world_size; i++) cn_c[i] = 0; for (int b = cell_min; b < cell_max; b++) { for (int c = (0); c < (g->blkSize); c++) { for (int n = (0); n < 3; n++) { if (g->cNeighborBlk[n]->data_pointer.p2[b][c] >= g->cBlkCnt) continue; if (g->cNeighborBlk[n]->data_pointer.p2[b][c] < g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_min || g->cNeighborBlk[n]->data_pointer.p2[b][c] >= g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_max) { cn_c[g->cNeighborBlk[n]->data_pointer.p2[b][c] / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)]++; } } } } cn_H[0] = cn_c[0] + cell_max * g->blkSize; for (int i = 1; i < g->mpi_world_size; i++) { cn_H[2 * i] = cn_c[i] + cn_H[2 * i - 2]; } int ml = 0; for (int i = 0; i < g->mpi_world_size; i++) { ml += cn_c[i]; } neighbor_map = malloc(ml * sizeof(int) * 5); for (int i = 0; i < g->mpi_world_size; i++) { cn_H[2 * i + 1] = cn_H[2 * i] % g->blkSize; cn_H[2 * i] = cn_H[2 * i] / g->blkSize; } int *tp = malloc(g->mpi_world_size * sizeof(int) * 2); tp[0] = cell_max; tp[1] = 0; for (int i = 1; i < g->mpi_world_size; i++) { tp[i * 2] = cn_H[i * 2 - 2]; tp[i * 2 + 1] = cn_H[i * 2 - 1]; } int *mi = malloc(g->mpi_world_size * sizeof(int)); mi[0] = 0; for (int i = 1; i < g->mpi_world_size; i++) mi[i] = 5 * cn_c[i - 1] + mi[i - 1]; for (int b = cell_min; b < cell_max; b++) { for (int c = (0); c < (g->blkSize); c++) { for (int n = (0); n < 3; n++) { if (g->cNeighborBlk[n]->data_pointer.p2[b][c] >= g->cBlkCnt || g->cNeighborBlk[n]->data_pointer.p2[b][c] < 0) { g->cNeighborBlk[n]->data_pointer.p2[b][c] = -1; } else if (g->cNeighborBlk[n]->data_pointer.p2[b][c] < g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_min || g->cNeighborBlk[n]->data_pointer.p2[b][c] >= g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_max) { int pn = g->cNeighborBlk[n]->data_pointer.p2[b][c] / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); neighbor_map[mi[pn]++] = pn; neighbor_map[mi[pn]++] = g->cNeighborBlk[n]->data_pointer.p2[b][c] % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); neighbor_map[mi[pn]++] = g->cNeighborIdx[n]->data_pointer.p2[b][c]; neighbor_map[mi[pn]++] = tp[pn * 2]; neighbor_map[mi[pn]++] = tp[pn * 2 + 1]; g->cNeighborBlk[n]->data_pointer.p2[b][c] = tp[pn * 2]; g->cNeighborIdx[n]->data_pointer.p2[b][c] = tp[pn * 2 + 1]; if (++tp[pn * 2 + 1] == g->blkSize) { tp[pn * 2]++; tp[pn * 2 + 1] = 0; } } else { g->cNeighborBlk[n]->data_pointer.p2[b][c] = g->cNeighborBlk[n]->data_pointer.p2[b][c] % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); } } } } int *ce_H = malloc(g->mpi_world_size * sizeof(int) * 2); ce_c = malloc(g->mpi_world_size * sizeof(int)); for (int i = 0; i < g->mpi_world_size; i++) ce_c[i] = 0; for (int b = cell_min; b < cell_max; b++) { for (int c = (0); c < (g->blkSize); c++) { for (int n = (0); n < 3; n++) { if (g->cEdgeBlk[n]->data_pointer.p2[b][c] < g->mpi_rank * ((((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + edge_min || g->cEdgeBlk[n]->data_pointer.p2[b][c] >= g->mpi_rank * ((((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + edge_max) { ce_c[g->cEdgeBlk[n]->data_pointer.p2[b][c] / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)]++; } } } } ce_H[0] = ce_c[0] + edge_max * g->blkSize; for (int i = 1; i < g->mpi_world_size; i++) { ce_H[2 * i] = ce_c[i] + ce_H[2 * i - 2]; } ml = 0; for (int i = 0; i < g->mpi_world_size; i++) { ml += ce_c[i]; } cedge_map = malloc(ml * sizeof(int) * 5); for (int i = 0; i < g->mpi_world_size; i++) { ce_H[2 * i + 1] = ce_H[2 * i] % g->blkSize; ce_H[2 * i] = ce_H[2 * i] / g->blkSize; } local_edge_blocks = ce_H[g->mpi_world_size * 2 - 2] + 1; tp[0] = edge_max; tp[1] = 0; for (int i = 1; i < g->mpi_world_size; i++) { tp[i * 2] = ce_H[i * 2 - 2]; tp[i * 2 + 1] = ce_H[i * 2 - 1]; } mi[0] = 0; for (int i = 1; i < g->mpi_world_size; i++) mi[i] = 5 * ce_c[i - 1] + mi[i - 1]; for (int b = cell_min; b < cell_max; b++) { for (int c = (0); c < (g->blkSize); c++) { for (int n = (0); n < 3; n++) { if (g->cEdgeBlk[n]->data_pointer.p2[b][c] < g->mpi_rank * ((((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + edge_min || g->cEdgeBlk[n]->data_pointer.p2[b][c] >= g->mpi_rank * ((((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + edge_max) { int pn = g->cEdgeBlk[n]->data_pointer.p2[b][c] / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); cedge_map[mi[pn]++] = pn; cedge_map[mi[pn]++] = g->cEdgeBlk[n]->data_pointer.p2[b][c] % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); cedge_map[mi[pn]++] = g->cEdgeIdx[n]->data_pointer.p2[b][c]; cedge_map[mi[pn]++] = tp[pn * 2]; cedge_map[mi[pn]++] = tp[pn * 2 + 1]; g->cEdgeBlk[n]->data_pointer.p2[b][c] = tp[pn * 2]; g->cEdgeIdx[n]->data_pointer.p2[b][c] = tp[pn * 2 + 1]; if (++tp[pn * 2 + 1] == g->blkSize) { tp[pn * 2]++; tp[pn * 2 + 1] = 0; } } else { g->cEdgeBlk[n]->data_pointer.p2[b][c] = g->cEdgeBlk[n]->data_pointer.p2[b][c] % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); } } } } int *ec_H = malloc(g->mpi_world_size * sizeof(int) * 2); ec_c = malloc(g->mpi_world_size * sizeof(int)); for (int i = 0; i < g->mpi_world_size; i++) ec_c[i] = 0; for (int b = edge_min; b < edge_max; b++) { for (int e = (0); e < (g->blkSize); e++) { for (int n = (0); n < 2; n++) { if (g->eCellBlk[n]->data_pointer.p2[b][e] >= g->cBlkCnt) continue; if (g->eCellBlk[n]->data_pointer.p2[b][e] < g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_min || g->eCellBlk[n]->data_pointer.p2[b][e] >= g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_max) { ec_c[g->eCellBlk[n]->data_pointer.p2[b][e] / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)]++; } } } } ec_H[0] = ec_c[0] + cn_H[g->mpi_world_size * 2 - 2] * g->blkSize + cn_H[g->mpi_world_size * 2 - 1]; for (int i = 1; i < g->mpi_world_size; i++) { ec_H[2 * i] = ec_c[i] + ec_H[2 * i - 2]; } ml = 0; for (int i = 0; i < g->mpi_world_size; i++) { ml += ec_c[i]; } ecell_map = malloc(ml * sizeof(int) * 5); for (int i = 0; i < g->mpi_world_size; i++) { ec_H[2 * i + 1] = ec_H[2 * i] % g->blkSize; ec_H[2 * i] = ec_H[2 * i] / g->blkSize; } local_cell_blocks = ec_H[g->mpi_world_size * 2 - 2] + 1; tp[0] = cn_H[g->mpi_world_size * 2 - 2]; tp[1] = cn_H[g->mpi_world_size * 2 - 1]; for (int i = 1; i < g->mpi_world_size; i++) { tp[i * 2] = ec_H[i * 2 - 2]; tp[i * 2 + 1] = ec_H[i * 2 - 1]; } mi[0] = 0; for (int i = 1; i < g->mpi_world_size; i++) mi[i] = 5 * ec_c[i - 1] + mi[i - 1]; for (int b = edge_min; b < edge_max; b++) { for (int e = (0); e < (g->blkSize); e++) { for (int n = (0); n < 2; n++) { if (g->eCellBlk[n]->data_pointer.p2[b][e] >= g->cBlkCnt || g->eCellBlk[n]->data_pointer.p2[b][e] < 0) { g->eCellBlk[n]->data_pointer.p2[b][e] = -1; } else if (g->eCellBlk[n]->data_pointer.p2[b][e] < g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_min || g->eCellBlk[n]->data_pointer.p2[b][e] >= g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_max) { int pn = g->eCellBlk[n]->data_pointer.p2[b][e] / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); ecell_map[mi[pn]++] = pn; ecell_map[mi[pn]++] = g->eCellBlk[n]->data_pointer.p2[b][e] % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); ecell_map[mi[pn]++] = g->eCellIdx[n]->data_pointer.p2[b][e]; ecell_map[mi[pn]++] = tp[pn * 2]; ecell_map[mi[pn]++] = tp[pn * 2 + 1]; g->eCellBlk[n]->data_pointer.p2[b][e] = tp[pn * 2]; g->eCellIdx[n]->data_pointer.p2[b][e] = tp[pn * 2 + 1]; if (++tp[pn * 2 + 1] == g->blkSize) { tp[pn * 2]++; tp[pn * 2 + 1] = 0; } } else { g->eCellBlk[n]->data_pointer.p2[b][e] = g->eCellBlk[n]->data_pointer.p2[b][e] % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); } } } } free(tp); mpi_send_requests = malloc(g->mpi_world_size * 2 * sizeof(MPI_Request)); mpi_recv_requests = &mpi_send_requests[g->mpi_world_size]; mpi_send_requests[g->mpi_rank] = MPI_REQUEST_NULL; mpi_recv_requests[g->mpi_rank] = MPI_REQUEST_NULL; comm_tag = 10; cn_crem = malloc(g->mpi_world_size * sizeof(int)); ce_crem = malloc(g->mpi_world_size * sizeof(int)); ec_crem = malloc(g->mpi_world_size * sizeof(int)); for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { MPI_Isend(&cn_c[pn], 1, MPI_INT, pn, 0, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&cn_crem[pn], 1, MPI_INT, pn, 0, MPI_COMM_WORLD, &mpi_recv_requests[pn]); MPI_Isend(&ce_c[pn], 1, MPI_INT, pn, 1, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&ce_crem[pn], 1, MPI_INT, pn, 1, MPI_COMM_WORLD, &mpi_recv_requests[pn]); MPI_Isend(&ec_c[pn], 1, MPI_INT, pn, 2, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&ec_crem[pn], 1, MPI_INT, pn, 2, MPI_COMM_WORLD, &mpi_recv_requests[pn]); } else cn_c[g->mpi_rank] = ce_c[g->mpi_rank] = ec_c[g->mpi_rank] = cn_crem[g->mpi_rank] = ce_crem[g->mpi_rank] = ec_crem[g->mpi_rank] = 0; } MPI_Waitall(g->mpi_world_size * 2, mpi_send_requests, MPI_STATUSES_IGNORE); for (int i = 1; i < g->mpi_world_size; i++) { cn_c[i] += cn_c[i - 1]; cn_crem[i] += cn_crem[i - 1]; } neighbor_maprem = malloc(cn_crem[g->mpi_world_size - 1] * sizeof(int) * 2); for (int i = 1; i < g->mpi_world_size; i++) { ce_c[i] += ce_c[i - 1]; ce_crem[i] += ce_crem[i - 1]; } cedge_maprem = malloc(ce_crem[g->mpi_world_size - 1] * sizeof(int) * 2); for (int i = 1; i < g->mpi_world_size; i++) { ec_c[i] += ec_c[i - 1]; ec_crem[i] += ec_crem[i - 1]; } ecell_maprem = malloc(ec_crem[g->mpi_world_size - 1] * sizeof(int) * 2); for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { int *buf = malloc((cn_c[pn] - (pn ? cn_c[pn - 1] : 0)) * sizeof(int) * 2); for (int i = 0; i < (cn_c[pn] - (pn ? cn_c[pn - 1] : 0)); i++) { buf[2 * i] = neighbor_map[(pn ? cn_c[pn - 1] * 5 : 0) + 5 * i + 1]; buf[2 * i + 1] = neighbor_map[(pn ? cn_c[pn - 1] * 5 : 0) + 5 * i + 2]; } MPI_Isend(buf, (cn_c[pn] - (pn ? cn_c[pn - 1] : 0)) * 2, MPI_INT, pn, 3, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&neighbor_maprem[(pn ? cn_crem[pn - 1] * 2 : 0)], (cn_crem[pn] - (pn ? cn_crem[pn - 1] : 0)) * 2, MPI_INT, pn, 3, MPI_COMM_WORLD, &mpi_recv_requests[pn]); MPI_Wait(&mpi_recv_requests[pn], MPI_STATUS_IGNORE); MPI_Wait(&mpi_send_requests[pn], MPI_STATUS_IGNORE); free(buf); buf = malloc((ce_c[pn] - (pn ? ce_c[pn - 1] : 0)) * sizeof(int) * 2); for (int i = 0; i < (ce_c[pn] - (pn ? ce_c[pn - 1] : 0)); i++) { buf[2 * i] = cedge_map[(pn ? ce_c[pn - 1] * 5 : 0) + 5 * i + 1]; buf[2 * i + 1] = cedge_map[(pn ? ce_c[pn - 1] * 5 : 0) + 5 * i + 2]; } MPI_Isend(buf, (ce_c[pn] - (pn ? ce_c[pn - 1] : 0)) * 2, MPI_INT, pn, 4, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&cedge_maprem[(pn ? ce_crem[pn - 1] * 2 : 0)], (ce_crem[pn] - (pn ? ce_crem[pn - 1] : 0)) * 2, MPI_INT, pn, 4, MPI_COMM_WORLD, &mpi_recv_requests[pn]); MPI_Wait(&mpi_recv_requests[pn], MPI_STATUS_IGNORE); MPI_Wait(&mpi_send_requests[pn], MPI_STATUS_IGNORE); free(buf); buf = malloc((ec_c[pn] - (pn ? ec_c[pn - 1] : 0)) * sizeof(int) * 2); for (int i = 0; i < (ec_c[pn] - (pn ? ec_c[pn - 1] : 0)); i++) { buf[2 * i] = ecell_map[(pn ? ec_c[pn - 1] * 5 : 0) + 5 * i + 1]; buf[2 * i + 1] = ecell_map[(pn ? ec_c[pn - 1] * 5 : 0) + 5 * i + 2]; } MPI_Isend(buf, (ec_c[pn] - (pn ? ec_c[pn - 1] : 0)) * 2, MPI_INT, pn, 5, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&ecell_maprem[(pn ? ec_crem[pn - 1] * 2 : 0)], (ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)) * 2, MPI_INT, pn, 5, MPI_COMM_WORLD, &mpi_recv_requests[pn]); MPI_Wait(&mpi_recv_requests[pn], MPI_STATUS_IGNORE); MPI_Wait(&mpi_send_requests[pn], MPI_STATUS_IGNORE); free(buf); } } neighbor_2Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); neighbor_3Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); cedge_2Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); cedge_3Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); ecell_2Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); ecell_3Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { neighbor_2Dbufrem[pn] = malloc((cn_crem[pn] - (pn ? cn_crem[pn - 1] : 0)) * sizeof(GVAL)); neighbor_3Dbufrem[pn] = malloc((cn_crem[pn] - (pn ? cn_crem[pn - 1] : 0)) * g->height * sizeof(GVAL)); cedge_2Dbufrem[pn] = malloc((ce_crem[pn] - (pn ? ce_crem[pn - 1] : 0)) * sizeof(GVAL)); cedge_3Dbufrem[pn] = malloc((ce_crem[pn] - (pn ? ce_crem[pn - 1] : 0)) * g->height * sizeof(GVAL)); ecell_2Dbufrem[pn] = malloc((ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)) * sizeof(GVAL)); ecell_3Dbufrem[pn] = malloc((ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)) * g->height * sizeof(GVAL)); } } neighbor_2Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); neighbor_3Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); cedge_2Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); cedge_3Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); ecell_2Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); ecell_3Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { neighbor_2Dbuf[pn] = malloc((cn_c[pn] - (pn ? cn_c[pn - 1] : 0)) * sizeof(GVAL)); neighbor_3Dbuf[pn] = malloc((cn_c[pn] - (pn ? cn_c[pn - 1] : 0)) * g->height * sizeof(GVAL)); cedge_2Dbuf[pn] = malloc((ce_c[pn] - (pn ? ce_c[pn - 1] : 0)) * sizeof(GVAL)); cedge_3Dbuf[pn] = malloc((ce_c[pn] - (pn ? ce_c[pn - 1] : 0)) * g->height * sizeof(GVAL)); ecell_2Dbuf[pn] = malloc((ec_c[pn] - (pn ? ec_c[pn - 1] : 0)) * sizeof(GVAL)); ecell_3Dbuf[pn] = malloc((ec_c[pn] - (pn ? ec_c[pn - 1] : 0)) * g->height * sizeof(GVAL)); } } } } void get_indices_c(GRID * g, int blk, int *cb, int *ce) { *cb = 0; *ce = blk == g->cBlkCnt - 1 ? g->cellCount % g->blkSize == 0 ? g->blkSize : g->cellCount % g->blkSize : g->blkSize; } void get_indices_e(GRID * g, int blk, int *eb, int *ee) { *eb = 0; *ee = blk == g->eBlkCnt - 1 ? g->edgeCount % g->blkSize == 0 ? g->blkSize : g->edgeCount % g->blkSize : g->blkSize; }
offloading_success.c
// RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu #include <stdio.h> #include <omp.h> int main(void) { int isHost = 0; #pragma omp target { isHost = omp_is_initial_device(); } // The compiler doesn't have support to launch the target region on the // device. // CHECK: Target region executed on the host printf("Target region executed on the %s\n", isHost ? "host" : "device"); return !isHost; }
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickWand Deprecated Methods % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define PixelViewId "PixelView" /* Typedef declarations. */ struct _PixelView { size_t id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; CacheView *view; RectangleInfo region; size_t number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; size_t signature; }; #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickAverageImages() average a set of images. % % The format of the MagickAverageImages method is: % % MagickWand *MagickAverageImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ static MagickWand *CloneMagickWandFromImages(const MagickWand *wand, Image *images) { MagickWand *clone_wand; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand)); if (clone_wand == (MagickWand *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", images->filename); (void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand)); clone_wand->id=AcquireWandId(); (void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g", MagickWandId,(double) clone_wand->id); clone_wand->exception=AcquireExceptionInfo(); InheritException(clone_wand->exception,wand->exception); clone_wand->image_info=CloneImageInfo(wand->image_info); clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info); clone_wand->images=images; clone_wand->debug=IsEventLogging(); if (clone_wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name); clone_wand->signature=WandSignature; return(clone_wand); } WandExport MagickWand *MagickAverageImages(MagickWand *wand) { Image *average_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); average_image=EvaluateImages(wand->images,MeanEvaluateOperator, wand->exception); if (average_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,average_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register ssize_t i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (ssize_t) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const size_t number_wands,const size_t number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict duplex_indexes, *restrict indexes; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % size_t GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetPixelViewIterator) #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { const int id = GetOpenMPThreadId(); assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % size_t GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % ssize_t GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % ssize_t GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C l i p P a t h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickClipPathImage() clips along the named paths from the 8BIM profile, if % present. Later operations take effect inside the path. Id may be a number % if preceded with #, to work on a numbered path, e.g., "#1" to use the first % path. % % The format of the MagickClipPathImage method is: % % MagickBooleanType MagickClipPathImage(MagickWand *wand, % const char *pathname,const MagickBooleanType inside) % % A description of each parameter follows: % % o wand: the magick wand. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand, const char *pathname,const MagickBooleanType inside) { return(MagickClipImagePath(wand,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetFillAlpha() returns the alpha used when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawGetFillAlpha method is: % % double DrawGetFillAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport double DrawGetFillAlpha(const DrawingWand *wand) { return(DrawGetFillOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetStrokeAlpha() returns the alpha of stroked object outlines. % % The format of the DrawGetStrokeAlpha method is: % % double DrawGetStrokeAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. */ WandExport double DrawGetStrokeAlpha(const DrawingWand *wand) { return(DrawGetStrokeOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P e e k G r a p h i c W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPeekGraphicWand() returns the current drawing wand. % % The format of the PeekDrawingWand method is: % % DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) { return(PeekDrawingWand(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P o p G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPopGraphicContext() destroys the current drawing wand and returns to the % previously pushed drawing wand. Multiple drawing wands may exist. It is an % error to attempt to pop more drawing wands than have been pushed, and it is % proper form to pop all drawing wands which have been pushed. % % The format of the DrawPopGraphicContext method is: % % MagickBooleanType DrawPopGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPopGraphicContext(DrawingWand *wand) { (void) PopDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P u s h G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPushGraphicContext() clones the current drawing wand to create a new % drawing wand. The original drawing wand(s) may be returned to by % invoking PopDrawingWand(). The drawing wands are stored on a drawing wand % stack. For every Pop there must have already been an equivalent Push. % % The format of the DrawPushGraphicContext method is: % % MagickBooleanType DrawPushGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPushGraphicContext(DrawingWand *wand) { (void) PushDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetFillAlpha() sets the alpha to use when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawSetFillAlpha method is: % % void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o fill_alpha: fill alpha % */ WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) { DrawSetFillOpacity(wand,fill_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetStrokeAlpha() specifies the alpha of stroked object outlines. % % The format of the DrawSetStrokeAlpha method is: % % void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o stroke_alpha: stroke alpha. The value 1.0 is opaque. % */ WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) { DrawSetStrokeOpacity(wand,stroke_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickColorFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickColorFloodfillImage method is: % % MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, % const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); PixelGetQuantumColor(fill,&draw_info->fill); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=ColorFloodfillImage(wand->images,draw_info,target,x,y, bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickDescribeImage() identifies an image by printing its attributes to the % file. Attributes include the image width, height, size, and others. % % The format of the MagickDescribeImage method is: % % const char *MagickDescribeImage(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport char *MagickDescribeImage(MagickWand *wand) { return(MagickIdentifyImage(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k F l a t t e n I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickFlattenImages() merges a sequence of images. This useful for % combining Photoshop layers into a single image. % % The format of the MagickFlattenImages method is: % % MagickWand *MagickFlattenImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickFlattenImages(MagickWand *wand) { Image *flatten_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); flatten_image=FlattenImages(wand->images,wand->exception); if (flatten_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,flatten_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageAttribute() returns a value associated with the specified % property. Use MagickRelinquishMemory() to free the value when you are % finished with it. % % The format of the MagickGetImageAttribute method is: % % char *MagickGetImageAttribute(MagickWand *wand,const char *property) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % */ WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property) { return(MagickGetImageProperty(wand,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageIndex() returns the index of the current image. % % The format of the MagickGetImageIndex method is: % % ssize_t MagickGetImageIndex(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport ssize_t MagickGetImageIndex(MagickWand *wand) { return(MagickGetIteratorIndex(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageChannelExtrema() gets the extrema for one or more image % channels. % % The format of the MagickGetImageChannelExtrema method is: % % MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, % const ChannelType channel,size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, const ChannelType channel,size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageChannelExtrema(wand->images,channel,minima,maxima, wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageExtrema() gets the extrema for the image. % % The format of the MagickGetImageExtrema method is: % % MagickBooleanType MagickGetImageExtrema(MagickWand *wand, % size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand, size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageExtrema(wand->images,minima,maxima,wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e M a t t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageMatte() returns MagickTrue if the image has a matte channel % otherwise MagickFalse. % % The format of the MagickGetImageMatte method is: % % size_t MagickGetImageMatte(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(wand->images->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImagePixels() extracts pixel data from an image and returns it to % you. The method returns MagickTrue on success otherwise MagickFalse if an % error is encountered. The data is returned as char, short int, int, ssize_t, % float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickGetImagePixels method is: % % MagickBooleanType MagickGetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, void *pixels) { return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageSize() returns the image length in bytes. % % The format of the MagickGetImageSize method is: % % MagickBooleanType MagickGetImageSize(MagickWand *wand, % MagickSizeType *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the image length in bytes. % */ WandExport MagickSizeType MagickGetImageSize(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(GetBlobSize(wand->images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMapImage() replaces the colors of an image with the closest color % from a reference image. % % The format of the MagickMapImage method is: % % MagickBooleanType MagickMapImage(MagickWand *wand, % const MagickWand *map_wand,const MagickBooleanType dither) % % A description of each parameter follows: % % o wand: the magick wand. % % o map: the map wand. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ WandExport MagickBooleanType MagickMapImage(MagickWand *wand, const MagickWand *map_wand,const MagickBooleanType dither) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL)) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=MapImage(wand->images,map_wand->images,dither); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMatteFloodfillImage() changes the transparency value of any pixel that % matches target and is an immediate neighbor. If the method % FillToBorderMethod is specified, the transparency value is changed for any % neighbor pixel that does not match the bordercolor member of image. % % The format of the MagickMatteFloodfillImage method is: % % MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, % const double alpha,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, const double alpha,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=MatteFloodfillImage(wand->images,target,ClampToQuantum( (MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The format of the MagickMedianFilterImage method is: % % MagickBooleanType MagickMedianFilterImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand, const double radius) { Image *median_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); median_image=MedianFilterImage(wand->images,radius,wand->exception); if (median_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,median_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMinimumImages() returns the minimum intensity of an image sequence. % % The format of the MagickMinimumImages method is: % % MagickWand *MagickMinimumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMinimumImages(MagickWand *wand) { Image *minimum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); minimum_image=EvaluateImages(wand->images,MinEvaluateOperator, wand->exception); if (minimum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,minimum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickModeImage() makes each pixel the 'predominant color' of the % neighborhood of the specified radius. % % The format of the MagickModeImage method is: % % MagickBooleanType MagickModeImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickModeImage(MagickWand *wand, const double radius) { Image *mode_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); mode_image=ModeImage(wand->images,radius,wand->exception); if (mode_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,mode_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMosaicImages() inlays an image sequence to form a single coherent % picture. It returns a wand with each image in the sequence composited at % the location defined by the page offset of the image. % % The format of the MagickMosaicImages method is: % % MagickWand *MagickMosaicImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMosaicImages(MagickWand *wand) { Image *mosaic_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); mosaic_image=MosaicImages(wand->images,wand->exception); if (mosaic_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,mosaic_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickOpaqueImage method is: % % MagickBooleanType MagickOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImage(wand,target,fill,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickPaintFloodfillImage method is: % % MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, % const ChannelType channel,const PixelWand *fill,const double fuzz, % const PixelWand *bordercolor,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, const ChannelType channel,const PixelWand *fill,const double fuzz, const PixelWand *bordercolor,const ssize_t x,const ssize_t y) { MagickBooleanType status; status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickPaintOpaqueImage method is: % % MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, % const ChannelType channel,const PixelWand *target, % const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz)); } WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, const ChannelType channel,const PixelWand *target,const PixelWand *fill, const double fuzz) { MagickBooleanType status; status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickPaintTransparentImage method is: % % MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R a d i a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRadialBlurImage() radial blurs an image. % % The format of the MagickRadialBlurImage method is: % % MagickBooleanType MagickRadialBlurImage(MagickWand *wand, % const double angle) % MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand, % const ChannelType channel,const double angle) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o angle: the angle of the blur in degrees. % */ WandExport MagickBooleanType MagickRadialBlurImage(MagickWand *wand, const double angle) { return(MagickRotationalBlurImage(wand,angle)); } WandExport MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand, const ChannelType channel,const double angle) { return(MagickRotationalBlurImageChannel(wand,channel,angle)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRecolorImage() apply color transformation to an image. The method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the MagickRecolorImage method is: % % MagickBooleanType MagickRecolorImage(MagickWand *wand, % const size_t order,const double *color_matrix) % % A description of each parameter follows: % % o wand: the magick wand. % % o order: the number of columns and rows in the color matrix. % % o color_matrix: An array of doubles representing the color matrix. % */ WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand, const size_t order,const double *color_matrix) { Image *transform_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (color_matrix == (const double *) NULL) return(MagickFalse); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); transform_image=RecolorImage(wand->images,order,color_matrix, wand->exception); if (transform_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,transform_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickReduceNoiseImage() smooths the contours of an image while still % preserving edge information. The algorithm works by replacing each pixel % with its neighbor closest in value. A neighbor is defined by radius. Use % a radius of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the MagickReduceNoiseImage method is: % % MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, const double radius) { Image *noise_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); noise_image=ReduceNoiseImage(wand->images,radius,wand->exception); if (noise_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,noise_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMaximumImages() returns the maximum intensity of an image sequence. % % The format of the MagickMaximumImages method is: % % MagickWand *MagickMaximumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMaximumImages(MagickWand *wand) { Image *maximum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator, wand->exception); if (maximum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,maximum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageAttribute() associates a property with an image. % % The format of the MagickSetImageAttribute method is: % % MagickBooleanType MagickSetImageAttribute(MagickWand *wand, % const char *property,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % % o value: the value. % */ WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand, const char *property,const char *value) { return(SetImageProperty(wand->images,property,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageIndex() set the current image to the position of the list % specified with the index parameter. % % The format of the MagickSetImageIndex method is: % % MagickBooleanType MagickSetImageIndex(MagickWand *wand, % const ssize_t index) % % A description of each parameter follows: % % o wand: the magick wand. % % o index: the scene number. % */ WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand, const ssize_t index) { return(MagickSetIteratorIndex(wand,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k S e t I m a g e O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageOption() associates one or options with a particular image % format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes"). % % The format of the MagickSetImageOption method is: % % MagickBooleanType MagickSetImageOption(MagickWand *wand, % const char *format,const char *key,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o format: the image format. % % o key: The key. % % o value: The value. % */ WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand, const char *format,const char *key,const char *value) { char option[MaxTextExtent]; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); (void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value); return(DefineImageOption(wand->image_info,option)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickTransparentImage method is: % % MagickBooleanType MagickTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickPaintTransparentImage(wand,target,alpha,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e g i o n O f I n t e r e s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRegionOfInterestImage() extracts a region of the image and returns it % as a new wand. % % The format of the MagickRegionOfInterestImage method is: % % MagickWand *MagickRegionOfInterestImage(MagickWand *wand, % const size_t width,const size_t height,const ssize_t x, % const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o width: the region width. % % o height: the region height. % % o x: the region x offset. % % o y: the region y offset. % */ WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand, const size_t width,const size_t height,const ssize_t x, const ssize_t y) { return(MagickGetImageRegion(wand,width,height,x,y)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImagePixels() accepts pixel datand stores it in the image at the % location you specify. The method returns MagickFalse on success otherwise % MagickTrue if an error is encountered. The pixel data can be either char, % short int, int, ssize_t, float, or double in the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickSetImagePixels method is: % % MagickBooleanType MagickSetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % const void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter of a region % of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, % or DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, const void *pixels) { return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k W r i t e I m a g e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickWriteImageBlob() implements direct to memory image formats. It % returns the image as a blob and its length. Use MagickSetFormat() to % set the format of the returned blob (GIF, JPEG, PNG, etc.). % % Use MagickRelinquishMemory() to free the blob when you are done with it. % % The format of the MagickWriteImageBlob method is: % % unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the length of the blob. % */ WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) { return(MagickGetImageBlob(wand,length)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l G e t N e x t R o w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelGetNextRow() returns the next row as an array of pixel wands from the % pixel iterator. % % The format of the PixelGetNextRow method is: % % PixelWand **PixelGetNextRow(PixelIterator *iterator, % size_t *number_wands) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o number_wands: the number of pixel wands. % */ WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator) { size_t number_wands; return(PixelGetNextIteratorRow(iterator,&number_wands)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l I t e r a t o r G e t E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelIteratorGetException() returns the severity, reason, and description of % any error that occurs when using other methods in this API. % % The format of the PixelIteratorGetException method is: % % char *PixelIteratorGetException(const Pixeliterator *iterator, % ExceptionType *severity) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o severity: the severity of the error is returned here. % */ WandExport char *PixelIteratorGetException(const PixelIterator *iterator, ExceptionType *severity) { return(PixelGetIteratorException(iterator,severity)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=destination->region.y; y < (ssize_t) destination->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetPixelViewIterator) #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdatePixelViewIterator) #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } #endif
denseBlocksJacobi.h
// // Created by mbarb on 23/01/2018. // #ifndef PARALLELITERATIVE_DENSEBLOCKSJACOBI_H #define PARALLELITERATIVE_DENSEBLOCKSJACOBI_H #include "Eigen" #include "utils.h" #include "denseParallelJacobi.h" namespace Iterative { template <typename Scalar, long long SIZE> class denseBlocksJacobi : public denseParallelJacobi<Scalar, SIZE> { public: /** * * @param A linear system matrix * @param b known term vector * @param iterations max number of iterations * @param tolerance min error tolerated * @param workers number of threads * @param blockSize size of the block */ explicit denseBlocksJacobi( const Eigen::Matrix<Scalar, SIZE, SIZE>& A, const Eigen::ColumnVector<Scalar, SIZE>& b, const ulonglong iterations, const Scalar tolerance, const ulong workers = 0L, const ulonglong blockSize = 0L) : denseParallelJacobi<Scalar, SIZE>::denseParallelJacobi(A, b, iterations, tolerance, workers) { this->blockSize = blockSize; if (blockSize == 0) this->blockSize = std::max(ulong(this->A.cols() / workers), (ulong)1L); splitter(); } /** * * @return */ const Eigen::ColumnVector<Scalar, SIZE> solve() { Eigen::ColumnVector<Scalar, SIZE> oldSolution(this->solution); std::vector<Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>> inverses(blocks.size()); // compute the inverses of the blocks and memorize it #pragma omp parallel for for (int i = 0; i < blocks.size(); ++i) { inverses[i] = this->A.block(blocks[i].startCol, blocks[i].startRow, blocks[i].cols, blocks[i].rows).inverse(); } // start iterations std::vector<int> index; for (this->iteration=0L; this->iteration < this->iterations; ++this->iteration) { #pragma omp parallel for firstprivate(oldSolution) schedule(dynamic) for (int i = 0; i < inverses.size(); ++i) { // set zero the components of the solution b that corresponds to the inverse Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols); auto zeroBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols); zeroBlock.setZero(); // the segment of the solution b that this inverse approximates auto block = this->solution.segment(blocks[i].startCol, blocks[i].cols); // approximate the solution using the inverse and the solution at the previous iteration block = inverses[i] * (this->b - (this->A * oldSolution)).segment(blocks[i].startCol, blocks[i].cols); zeroBlock = block; if ((oldBlock - block).template lpNorm<1>() <= this->tolerance*block.size()) { #pragma omp critical index.emplace_back(i); } } if (!index.empty()) { std::sort(index.rbegin(), index.rend()); for (auto i : index) { blocks.erase(blocks.begin() + i); inverses.erase(inverses.begin() + i); } index.clear(); if (inverses.empty()) break; } std::swap(this->solution, oldSolution); } std::cout << this->iteration << std::endl; return this->solution; } protected: ulonglong blockSize; std::vector<Index> blocks; void splitter() { for (ulonglong i = 0; i < this->A.cols(); i += blockSize) { blocks.emplace_back(Index(i, std::min(blockSize, (ulonglong)this->A.cols() - i), i, std::min(blockSize, (ulonglong)this->A.rows() - i))); } } private: }; } #endif //PARALLELITERATIVE_BLOCKSJACOBI_H
for-loop.c
#include <stdio.h> #include <unistd.h> #include <string.h> #include <stdlib.h> #include <stdint.h> #include <omp.h> #include <sys/time.h> #include <time.h> int fib_seq (int n) {/*{{{*/ if (n<2) return n; else return fib_seq(n-1) + fib_seq(n-2); }/*}}}*/ long get_usecs(void) {/*{{{*/ struct timeval t; gettimeofday(&t, ((void *) 0)); return t.tv_sec * 1000000 + t.tv_usec; }/*}}}*/ int main(int argc, char **argv) {/*{{{*/ if (argc > 5) fprintf(stderr, "Usage: %s num_iterations work_intensity num_loops randomize\n", argv[0]); int num_iter = 512; int work_int = 32; int num_loops = 1; int randomize = 0; if(argc > 1) num_iter = atoi(argv[1]); if(argc > 2) work_int = atoi(argv[2]); if(argc > 3) num_loops = atoi(argv[3]); if(argc > 4) randomize = 1; if(randomize) srand(time(NULL)); /*srand(42)*/; fprintf(stderr, "Running %s %d %d %d %d ...\n", argv[0], num_iter, work_int, num_loops, randomize); long par_time_start = get_usecs(); for (int j = 0; j < num_loops; j++) { int k = num_iter; if(randomize) k = ( rand() % num_iter ) + 1; fprintf(stderr, "Starting loop %d with %d iterations ...\n", j, k); #pragma omp parallel for schedule(dynamic) for (int i = 0; i < k; i++) { int result = fib_seq(work_int); fprintf(stderr, "iteration %d thread %d result = %d\n", i, omp_get_thread_num(), result); } } long par_time_end = get_usecs(); double par_time = (double)( par_time_end - par_time_start) / 1000000; fprintf(stderr, "Execution time = %f s\n", par_time); #ifdef CHECK_RESULT fprintf(stderr, "%s(%d,%d,%d,%d), check result = %s\n", argv[0], num_iter, work_int, num_loops, randomize, "NOT PERFORMED"); #endif return 0; }/*}}}*/
adatm_cpd.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "matrix.h" #include "mttkrp.h" #include "timer.h" #include "thd_info.h" #include "util.h" #include "cpd.h" #include "adatm_base.h" #include "adatm_cpd.h" #include <math.h> #include <omp.h> /****************************************************************************** * SPLATT PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Resets serial and MPI timers that were activated during some CPD * pre-processing. * * @param rinfo MPI rank information. */ static void p_reset_cpd_timers( rank_info const * const rinfo) { timer_reset(&timers[TIMER_ATA]); #ifdef SPLATT_USE_MPI timer_reset(&timers[TIMER_MPI]); timer_reset(&timers[TIMER_MPI_IDLE]); timer_reset(&timers[TIMER_MPI_COMM]); timer_reset(&timers[TIMER_MPI_ATA]); timer_reset(&timers[TIMER_MPI_REDUCE]); timer_reset(&timers[TIMER_MPI_NORM]); timer_reset(&timers[TIMER_MPI_UPDATE]); timer_reset(&timers[TIMER_MPI_FIT]); MPI_Barrier(rinfo->comm_3d); #endif } /** * @brief Find the Frobenius norm squared of a Kruskal tensor. This equivalent * to via computing <X,X>, the inner product of X with itself. We find * this via \lambda^T (AtA * BtB * ...) \lambda, where * is the Hadamard * product. * * @param nmodes The number of modes in the tensor. * @param lambda The vector of column norms. * @param aTa An array of Gram Matrices (AtA, BtB, ...). * * @return The Frobenius norm of X, squared. */ static val_t p_kruskal_norm( idx_t const nmodes, val_t const * const restrict lambda, matrix_t ** aTa) { idx_t const rank = aTa[0]->J; val_t * const restrict av = aTa[MAX_NMODES]->vals; val_t norm_mats = 0; /* use aTa[MAX_NMODES] as scratch space */ for(idx_t x=0; x < rank*rank; ++x) { av[x] = 1.; } /* aTa[MAX_NMODES] = hada(aTa) */ for(idx_t m=0; m < nmodes; ++m) { val_t const * const restrict atavals = aTa[m]->vals; for(idx_t x=0; x < rank*rank; ++x) { av[x] *= atavals[x]; } } /* now compute lambda^T * aTa[MAX_NMODES] * lambda */ for(idx_t i=0; i < rank; ++i) { for(idx_t j=0; j < rank; ++j) { norm_mats += av[j+(i*rank)] * lambda[i] * lambda[j]; } } return fabs(norm_mats); } /** * @brief Compute the inner product of a Kruskal tensor and an unfactored * tensor. Assumes that 'm1' contains the MTTKRP result along the last * mode of the two input tensors. This naturally follows the end of a * CPD iteration. * * @param nmodes The number of modes in the input tensors. * @param rinfo MPI rank information. * @param thds OpenMP thread data structures. * @param lambda The vector of column norms. * @param mats The Kruskal-tensor matrices. * @param m1 The result of doing MTTKRP along the last mode. * * @return The inner product of the two tensors, computed via: * 1^T hadamard(mats[nmodes-1], m1) \lambda. */ static val_t p_tt_kruskal_inner( idx_t const nmodes, rank_info * const rinfo, thd_info * const thds, val_t const * const restrict lambda, matrix_t ** mats, matrix_t const * const m1) { idx_t const rank = mats[0]->J; idx_t const lastm = nmodes - 1; idx_t const dim = m1->I; val_t const * const m0 = mats[lastm]->vals; val_t const * const mv = m1->vals; val_t myinner = 0; #pragma omp parallel reduction(+:myinner) { int const tid = omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; for(idx_t r=0; r < rank; ++r) { accumF[r] = 0.; } #pragma omp for for(idx_t i=0; i < dim; ++i) { for(idx_t r=0; r < rank; ++r) { accumF[r] += m0[r+(i*rank)] * mv[r+(i*rank)]; } } /* accumulate everything into 'myinner' */ for(idx_t r=0; r < rank; ++r) { myinner += accumF[r] * lambda[r]; } } val_t inner = 0.; #ifdef SPLATT_USE_MPI timer_start(&timers[TIMER_MPI_FIT]); timer_start(&timers[TIMER_MPI_IDLE]); MPI_Barrier(rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_IDLE]); MPI_Allreduce(&myinner, &inner, 1, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_FIT]); #else inner = myinner; #endif return inner; } /** * @brief Compute the fit of a Kruskal tensor, Z, to an input tensor, X. This * is computed via 1 - [sqrt(<X,X> + <Z,Z> - 2<X,Z>) / sqrt(<X,X>)]. * * @param nmodes The number of modes in the input tensors. * @param rinfo MPI rank information. * @param thds OpenMP thread data structures. * @param ttnormsq The norm (squared) of the original input tensor, <X,X>. * @param lambda The vector of column norms. * @param mats The Kruskal-tensor matrices. * @param m1 The result of doing MTTKRP along the last mode. * @param aTa An array of matrices (length MAX_NMODES)containing BtB, CtC, etc. * * @return The inner product of the two tensors, computed via: * \lambda^T hadamard(mats[nmodes-1], m1) \lambda. */ static val_t p_calc_fit( idx_t const nmodes, rank_info * const rinfo, thd_info * const thds, val_t const ttnormsq, val_t const * const restrict lambda, matrix_t ** mats, matrix_t const * const m1, matrix_t ** aTa) { timer_start(&timers[TIMER_FIT]); /* First get norm of new model: lambda^T * (hada aTa) * lambda. */ val_t const norm_mats = p_kruskal_norm(nmodes, lambda, aTa); /* Compute inner product of tensor with new model */ val_t const inner = p_tt_kruskal_inner(nmodes, rinfo, thds, lambda, mats,m1); val_t const residual = sqrt(ttnormsq + norm_mats - (2 * inner)); timer_stop(&timers[TIMER_FIT]); return 1 - (residual / sqrt(ttnormsq)); } /****************************************************************************** * AdaTM PUBLIC FUNCTIONS *****************************************************************************/ int splatt_cpd_als_adaptive( splatt_csf const * const tensors, rcsf_seq_adaptive * const rs_seq, splatt_idx_t const n_csf, group_properties const * const grp_prop, splatt_idx_t const n_grp, splatt_idx_t const * const use_csfs, splatt_idx_t const * const use_tags, splatt_idx_t const nfactors, double const * const options, splatt_kruskal * factored) { matrix_t * mats[MAX_NMODES+1]; idx_t nmodes = tensors->nmodes; rank_info rinfo; rinfo.rank = 0; // jli: allocate the maximum space for each update factor matrix. /* allocate factor matrices */ idx_t maxdim = tensors->dims[argmax_elem(tensors->dims, nmodes)]; for(idx_t m=0; m < nmodes; ++m) { mats[m] = (matrix_t *) mat_rand(tensors[0].dims[m], nfactors); } mats[MAX_NMODES] = mat_alloc(maxdim, nfactors); val_t * lambda = (val_t *) splatt_malloc(nfactors * sizeof(val_t)); /* do the factorization! */ factored->fit = cpd_als_iterate_adaptive(tensors, rs_seq, n_csf, grp_prop, n_grp, use_csfs, use_tags, mats, lambda, nfactors, &rinfo, options); /* store output */ factored->rank = nfactors; factored->nmodes = nmodes; factored->lambda = lambda; for(idx_t m=0; m < nmodes; ++m) { factored->dims[m] = tensors->dims[m]; factored->factors[m] = mats[m]->vals; } /* clean up */ mat_free(mats[MAX_NMODES]); for(idx_t m=0; m < nmodes; ++m) { free(mats[m]); /* just the matrix_t ptr, data is safely in factored */ } return SPLATT_SUCCESS; } double cpd_als_iterate_adaptive( splatt_csf const * const tensors, rcsf_seq_adaptive * const rs_seq, splatt_idx_t const n_csf, group_properties const * const grp_prop, splatt_idx_t const n_grp, splatt_idx_t const * const use_csfs, splatt_idx_t const * const use_tags, matrix_t ** mats, val_t * const lambda, idx_t const nfactors, rank_info * const rinfo, double const * const opts) { idx_t const nmodes = tensors[0].nmodes; idx_t const nthreads = (idx_t) opts[SPLATT_OPTION_NTHREADS]; /* Setup thread structures. + 64 bytes is to avoid false sharing. * TODO make this better */ omp_set_num_threads(nthreads); thd_info * thds = thd_init(nthreads, 3, (nfactors * nfactors * sizeof(val_t)) + 64, 0, (nmodes * nfactors * sizeof(val_t)) + 64); matrix_t * m1 = mats[MAX_NMODES]; /* Initialize first A^T * A mats. We redundantly do the first because it * makes communication easier. */ matrix_t * aTa[MAX_NMODES+1]; for(idx_t m=0; m < nmodes; ++m) { aTa[m] = mat_alloc(nfactors, nfactors); mat_aTa(mats[m], aTa[m], rinfo, thds, nthreads); } /* used as buffer space */ aTa[MAX_NMODES] = mat_alloc(nfactors, nfactors); /* Compute input tensor norm */ double oldfit = 0; double fit = 0; val_t ttnormsq = csf_frobsq(tensors); /* setup timers */ p_reset_cpd_timers(rinfo); sp_timer_t itertime; sp_timer_t modetime[MAX_NMODES]; timer_start(&timers[TIMER_CPD]); idx_t const niters = (idx_t) opts[SPLATT_OPTION_NITER]; for(idx_t it=0; it < niters; ++it) { timer_fstart(&itertime); // for(idx_t m=0; m < nmodes; ++m) { for(idx_t m=0; m < nmodes; ++m) { timer_fstart(&modetime[m]); mats[MAX_NMODES]->I = tensors[0].dims[m]; m1->I = mats[m]->I; /* M1 = X * (C o B) */ timer_start(&timers[TIMER_MTTKRP]); mttkrp_csf_adaptive(tensors, rs_seq, n_csf, mats, m, thds, grp_prop, n_grp, use_csfs[m], use_tags[m], opts); timer_stop(&timers[TIMER_MTTKRP]); /* M2 = (CtC .* BtB .* ...)^-1 */ calc_gram_inv(m, nmodes, aTa); /* A = M1 * M2 */ memset(mats[m]->vals, 0, mats[m]->I * nfactors * sizeof(val_t)); mat_matmul(m1, aTa[MAX_NMODES], mats[m]); /* normalize columns and extract lambda */ if(it == 0) { mat_normalize(mats[m], lambda, MAT_NORM_2, rinfo, thds, nthreads); } else { mat_normalize(mats[m], lambda, MAT_NORM_MAX, rinfo, thds,nthreads); } /* update A^T*A */ mat_aTa(mats[m], aTa[m], rinfo, thds, nthreads); timer_stop(&modetime[m]); } /* foreach mode */ fit = p_calc_fit(nmodes, rinfo, thds, ttnormsq, lambda, mats, m1, aTa); timer_stop(&itertime); if(rinfo->rank == 0 && opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_NONE) { printf(" its = %3"SPLATT_PF_IDX" (%0.3fs) fit = %0.5f delta = %+0.4e\n", it+1, itertime.seconds, fit, fit - oldfit); printf(" mttkrp = %0.3fs\n", timers[TIMER_MTTKRP].seconds); if(opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_LOW) { for(idx_t m=0; m < nmodes; ++m) { printf(" mode = %1"SPLATT_PF_IDX" (%0.3fs)\n", m+1, modetime[m].seconds); } } } if(it > 0 && fabs(fit - oldfit) < opts[SPLATT_OPTION_TOLERANCE]) { break; } oldfit = fit; } timer_stop(&timers[TIMER_CPD]); printf("CPD = %0.3fs\n", timers[TIMER_CPD].seconds); cpd_post_process(nfactors, nmodes, mats, lambda, thds, nthreads, rinfo); /* CLEAN UP */ for(idx_t m=0; m < nmodes; ++m) { mat_free(aTa[m]); } mat_free(aTa[MAX_NMODES]); thd_free(thds, nthreads); return fit; }
tp1.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> #define N_MAX 1000 #define N_THREADS 24 #define MAT_SIZE 1024 #define M_SIZE MAT_SIZE + 2 int main() { FILE *file = fopen("result.txt", "w+"); int **G1, **G2; G1 = (int **)malloc(sizeof(int *) * M_SIZE); G2 = (int **)malloc(sizeof(int *) * M_SIZE); for (int i = 0; i < M_SIZE; i++) { G1[i] = (int *)malloc(sizeof(int) * M_SIZE); G2[i] = (int *)malloc(sizeof(int) * M_SIZE); } for (int i = 0; i < M_SIZE; i++) { for (int j = 0; j < M_SIZE; j++) { G1[i][j] = 0; G2[i][j] = 0; } } //Filling the lower line of the matrix with the highest heat for (int i = 0; i < M_SIZE; i++) { G1[i][0] = 0xffffff; //Hexcode ffffff } //------------------------------------------------------------------------------------------ double start_time = omp_get_wtime(); //Iterações sobre a difusão de calor for (int it = 0; it < N_MAX; it++) { #pragma omp parallel num_threads(N_THREADS) #pragma omp for schedule(static) for (int i = 1; i < M_SIZE - 1; i++) { for (int j = 1; j < M_SIZE - 1; j++) { G2[i][j] = (G1[i - 1][j] + G1[i + 1][j] + G1[i][j - 1] + G1[i][j + 1] + G1[i][j]) / 5; j++; } } //Copiar G2 para G1 #pragma omp for schedule(static) for (int i = 1; i < M_SIZE - 1; i++) { for (int j = 1; j < M_SIZE - 1; j++) { G1[i][j] = G2[i][j]; j++; } } } double end_time = omp_get_wtime(); printf("Time running: %lf\n", end_time - start_time); //Prints results to a file for (int i = 0; i < M_SIZE; i++) { for (int j = 0; j < M_SIZE; j++) fprintf(file, "%d|", G1[i][j]); } fprintf(file, "\n"); } }
core_dunpack_blasfeo.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from core_blas/core_zlacpy.c, normal z -> d, Thu Aug 8 10:20:04 2019 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #ifdef HAVE_BLASFEO_API #include "blasfeo_d_aux.h" #endif /***************************************************************************//** * * @ingroup core_lacpy * * Copies all or part of a two-dimensional matrix A to another matrix B. * ******************************************************************************* * * @param[in] uplo * - PlasmaGeneral: entire A, * - PlasmaUpper: upper triangle, * - PlasmaLower: lower triangle. * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] m * The number of rows of the matrices A and B. * m >= 0. * * @param[in] n * The number of columns of the matrices A and B. * n >= 0. * * @param[in] A * The m-by-n matrix to copy. * * @param[in] lda * The leading dimension of the array A. * lda >= max(1,m). * * @param[out] B * The m-by-n copy of the matrix A. * On exit, B = A ONLY in the locations specified by uplo. * * @param[in] ldb * The leading dimension of the array B. * ldb >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_dunpack_blasfeo(plasma_enum_t uplo, plasma_enum_t transa, int m, int n, const double *A, int lda, double *B, int ldb) { struct blasfeo_dmat sA; if (transa == PlasmaNoTrans) { #ifdef HAVE_BLASFEO_API // TODO assume double precision !!! blasfeo_create_dmat(m, n, &sA, A); sA.cn = lda; blasfeo_unpack_dmat(m, n, &sA, 0, 0, B, ldb); #else LAPACKE_dlacpy_work(LAPACK_COL_MAJOR, lapack_const(uplo), m, n, A, lda, B, ldb); #endif } else if (transa == PlasmaTrans) { switch (uplo) { case PlasmaUpper: for (int i = 0; i < imin(m, n); i++) for (int j = i; j < n; j++) B[j + i*ldb] = A[i + j*lda]; break; case PlasmaLower: for (int i = 0; i < m; i++) for (int j = 0; j <= imin(i, n); j++) B[j + i*ldb] = A[i + j*lda]; break; case PlasmaGeneral: #ifdef HAVE_BLASFEO_API // TODO assume double precision !!! // blasfeo_create_dmat(m, n, &sB, B); // blasfeo_pack_tran_dmat(m, n, A, lda, &sB, 0, 0); #else for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) B[j + i*ldb] = A[i + j*lda]; #endif break; } } else { switch (uplo) { case PlasmaUpper: for (int i = 0; i < imin(m, n); i++) for (int j = i; j < n; j++) B[j + i*ldb] = (A[i + j*lda]); break; case PlasmaLower: for (int i = 0; i < m; i++) for (int j = 0; j <= imin(i, n); j++) B[j + i*ldb] = (A[i + j*lda]); break; case PlasmaGeneral: #ifdef HAVE_BLASFEO_API // TODO #else for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) B[j + i*ldb] = (A[i + j*lda]); #endif break; } } } /******************************************************************************/ void plasma_core_omp_dunpack_blasfeo(plasma_enum_t uplo, plasma_enum_t transa, int m, int n, const double *A, int lda, double *B, int ldb, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:B[0:ldb*n]) { if (sequence->status == PlasmaSuccess) plasma_core_dunpack_blasfeo(uplo, transa, m, n, A, lda, B, ldb); } }
lock.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { //need to use an OpenMP construct so that OMPT will be initialized #pragma omp parallel num_threads(1) print_ids(0); omp_lock_t lock; printf("%" PRIu64 ": &lock: %" PRIu64 "\n", ompt_get_thread_data()->value, (ompt_wait_id_t)(uintptr_t) &lock); omp_init_lock(&lock); print_fuzzy_address(1); omp_set_lock(&lock); print_fuzzy_address(2); omp_unset_lock(&lock); print_fuzzy_address(3); omp_destroy_lock(&lock); print_fuzzy_address(4); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: &lock: [[WAIT_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_init_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_destroy_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] return 0; }
ZFC_VGG16_CPU.c
/* Pretrained VGG16 convolutional neural network in C language GitHUB Page: https://github.com/ZFTurbo/VGG16-Pretrained-C Author: ZFTurbo Compilation: gcc -O3 -fopenmp -lm ZFC_VGG16_CPU.c -o ZFC_VGG16_CPU.exe Usage: ZFC_VGG16_CPU.exe <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: ZFC_VGG16_CPU.exe "weights.txt" "image_list.txt" "results.txt" 1 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <omp.h> //################################################################ // Start of PAPI related variables //################################################################ #include <papi.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include "log_data.h" #include <pthread.h> extern int retval; extern int EventSet; extern int EventCode; extern int skip_papi_cntr; extern int skip_papi_cntr_threshold; extern char EventCodeStr[PAPI_MAX_STR_LEN]; extern long_long values[35]; extern struct timespec begin; extern struct timespec current; extern long long start; extern long long elapsed; extern long long microseconds; extern long long previoustime; extern pthread_mutex_t mutex_papi; int retval; int EventSet = PAPI_NULL; int EventCode; int skip_papi_cntr = 0; int skip_papi_cntr_threshold = 1; char EventCodeStr[PAPI_MAX_STR_LEN]; long_long values[35]; struct timespec begin; struct timespec current; long long start; long long elapsed; long long microseconds; long long previoustime = 0; static int NUM_THREADS = -1; pthread_mutex_t mutex_papi = PTHREAD_MUTEX_INITIALIZER; unsigned long omp_get_thread_num_wrapper(void) { return (unsigned long) omp_get_thread_num(); } //################################################################ // End of PAPI related variables //################################################################ #define NANOS 1000000000LL #ifdef _WIN32 #define _CRT_SECURE_NO_WARNINGS 1 void gettimeofday(time_t *tp, char *_) { *tp = clock(); return; } double get_seconds(time_t timeStart, time_t timeEnd) { return (double)(timeEnd - timeStart) / CLOCKS_PER_SEC; } #else double get_seconds(struct timeval timeStart, struct timeval timeEnd) { return ((timeEnd.tv_sec - timeStart.tv_sec) * 1000000 + timeEnd.tv_usec - timeStart.tv_usec) / 1.e6; } #endif #define SIZE 224 #define CONV_SIZE 3 int numthreads; // Weights and image block START float ***image; int cshape[13][4] = { { 64, 3, CONV_SIZE, CONV_SIZE }, { 64, 64, CONV_SIZE, CONV_SIZE }, { 128, 64, CONV_SIZE, CONV_SIZE }, { 128, 128, CONV_SIZE, CONV_SIZE }, { 256, 128, CONV_SIZE, CONV_SIZE }, { 256, 256, CONV_SIZE, CONV_SIZE }, { 256, 256, CONV_SIZE, CONV_SIZE }, { 512, 256, CONV_SIZE, CONV_SIZE }, { 512, 512, CONV_SIZE, CONV_SIZE }, { 512, 512, CONV_SIZE, CONV_SIZE }, { 512, 512, CONV_SIZE, CONV_SIZE }, { 512, 512, CONV_SIZE, CONV_SIZE }, { 512, 512, CONV_SIZE, CONV_SIZE } }; float *****wc; float **bc; int dshape[3][2] = { { 25088, 4096 }, { 4096, 4096 }, { 4096, 1000 } }; float ***wd; float **bd; // Blocks for intermediate convolutions int mem_block_shape[3] = {512, SIZE, SIZE}; float ***mem_block1; float ***mem_block2; // Blocks for dense flatten layers int mem_block_dense_shape = { 512 * 7 * 7 }; float *mem_block1_dense; float *mem_block2_dense; // Weights and image block END void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.0; } } } } void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.0; } } void init_memory() { int i, j, k, l; // Init image memory image = malloc(3 * sizeof(float**)); for (i = 0; i < 3; i++) { image[i] = malloc(SIZE * sizeof(float*)); for (j = 0; j < SIZE; j++) { image[i][j] = malloc(SIZE * sizeof(float)); } } // Init convolution weights wc = malloc(13 * sizeof(float****)); bc = malloc(13 * sizeof(float*)); for (l = 0; l < 13; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float***)); for (i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float**)); for (j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float*)); for (k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float)); } } } bc[l] = malloc(cshape[l][0] * sizeof(float)); } // Init dense weights wd = malloc(3 * sizeof(float**)); bd = malloc(3 * sizeof(float*)); for (l = 0; l < 3; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float*)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // Init mem_blocks mem_block1 = malloc(mem_block_shape[0] * sizeof(float**)); mem_block2 = malloc(mem_block_shape[0] * sizeof(float**)); for (i = 0; i < mem_block_shape[0]; i++) { mem_block1[i] = malloc(mem_block_shape[1] * sizeof(float*)); mem_block2[i] = malloc(mem_block_shape[1] * sizeof(float*)); for (j = 0; j < mem_block_shape[1]; j++) { mem_block1[i][j] = malloc(mem_block_shape[2] * sizeof(float)); mem_block2[i][j] = malloc(mem_block_shape[2] * sizeof(float)); } } reset_mem_block(mem_block1); reset_mem_block(mem_block2); // Init mem blocks dense mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); } void free_memory() { int i, j, k, l; // Free image memory for (i = 0; i < 3; i++) { for (j = 0; j < SIZE; j++) { free(image[i][j]); } free(image[i]); } free(image); // Free convolution weights for (l = 0; l < 13; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); free(bc[l]); } free(wc); free(bc); // Free dense weights for (l = 0; l < 3; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(mem_block1[i][j]); free(mem_block2[i][j]); } free(mem_block1[i]); free(mem_block2[i]); } free(mem_block1); free(mem_block2); free(mem_block1_dense); free(mem_block2_dense); } void read_weights(char *in_file, int lvls) { float dval; int i, j, k, l, z; //FILE *iin; int total_lvls_read = 0; /*iin = fopen(in_file, "r"); if (iin == NULL) { printf("File %s absent\n", in_file); exit(1); } */ // Reading convolution weights (store them flipped from begining) for (z = 0; z < 13; z++) { if (total_lvls_read >= lvls && lvls != -1) break; printf("Read conv block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { for (j = 0; j < cshape[z][1]; j++) { for (k = 0; k < cshape[z][2]; k++) { for (l = 0; l < cshape[z][3]; l++) { //fscanf(iin, "%f", &dval); dval = rand() / RAND_MAX; wc[z][i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = dval; } } } } for (i = 0; i < cshape[z][0]; i++) { //fscanf(iin, "%f", &dval); dval = rand() / RAND_MAX; bc[z][i] = dval; } total_lvls_read += 1; } // Reading dense weights for (z = 0; z < 3; z++) { if (total_lvls_read >= lvls && lvls != -1) break; printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { dval = rand() / RAND_MAX; wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { dval = rand() / RAND_MAX; bd[z][i] = dval; } total_lvls_read += 1; } //fclose(iin); } void read_image(char *in_file) { int i, j, l; //FILE *iin; float dval; /*iin = fopen(in_file, "r"); if (iin == NULL) { printf("File %s absent\n", in_file); exit(1); } */ /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { dval = rand() / RAND_MAX; //fscanf(iin, "%f", &dval); image[l][i][j] = dval; } } } // fclose(iin); } void normalize_image() { int i, j, l; float coef[3] = { 103.939, 116.779, 123.68 }; for (l = 0; l < 3; l++) { for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { image[l][i][j] -= coef[l]; } } } } void convolution_3_x_3(float **matrix, float **kernel, float **out, int size) { int i, j; float sum; float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { sum = zeropad[i][j] * kernel[0][0] + zeropad[i + 1][j] * kernel[1][0] + zeropad[i + 2][j] * kernel[2][0] + zeropad[i][j + 1] * kernel[0][1] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i][j + 2] * kernel[0][2] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } } void add_bias_and_relu(float **out, float bs, int size) { int i, j; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { out[i][j] += bs; if (out[i][j] < 0) out[i][j] = 0.0; // printf("%.12lf\n", out[i][j]); } } } void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; if (relu == 1) { if (out[i] < 0) out[i] = 0.0; } } } float max_of_4(float a, float b, float c, float d) { if (a >= b && a >= c && a >= d) { return a; } if (b >= c && b >= d) { return b; } if (c >= d) { return c; } return d; } void maxpooling(float **out, int size) { int i, j; for (i = 0; i < size; i+=2) { for (j = 0; j < size; j+=2) { out[i / 2][j / 2] = max_of_4(out[i][j], out[i + 1][j], out[i][j + 1], out[i + 1][j + 1]); } } } void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } } void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { int i, j; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } } void softmax(float *out, int sh_out) { int i; float max_val, sum; max_val = out[0]; for (i = 1; i < sh_out; i++) { if (out[i] > max_val) max_val = out[i]; } sum = 0.0; for (i = 0; i < sh_out; i++) { out[i] = exp(out[i] - max_val); sum += out[i]; } for (i = 0; i < sh_out; i++) { out[i] /= sum; } } void dump_memory_structure_conv(float ***mem, int sh0, int sh1, int sh2) { int i, j, k; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { printf("%.12lf\n", mem[i][j][k]); } } } } void dump_memory_structure_conv_to_file(float ***mem, int sh0, int sh1, int sh2) { FILE *out; int i, j, k; out = fopen("debug_c.txt", "w"); for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { fprintf(out, "%.12lf\n", mem[i][j][k]); } } } fclose(out); } void dump_memory_structure_dense(float *mem, int sh0) { int i; for (i = 0; i < sh0; i++) { printf("%.12lf\n", mem[i]); } } void dump_memory_structure_dense_to_file(float *mem, int sh0) { FILE *out; int i; out = fopen("debug_c.txt", "w"); for (i = 0; i < sh0; i++) { fprintf(out, "%.12lf\n", mem[i]); } fclose(out); } void dump_image() { int i, j, k; for (i = 0; i < 3; i++) { for (j = 0; j < SIZE; j++) { for (k = 0; k < SIZE; k++) { printf("%.12lf\n", image[i][j][k]); } } } } void get_VGG16_predict(int only_convolution) { int i, j; int level, cur_size; // Init intermediate memory reset_mem_block(mem_block1); reset_mem_block(mem_block2); reset_mem_block_dense(mem_block1_dense); reset_mem_block_dense(mem_block2_dense); waca_papi_read("Hello"); // Layer 1 (Convolution 3 -> 64) level = 0; cur_size = SIZE; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(image[j], wc[level][i][j], mem_block1[i], cur_size); } add_bias_and_relu(mem_block1[i], bc[level][i], cur_size); } waca_papi_read("Hello"); // Layer 2 (Convolution 64 -> 64) level = 1; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size); } add_bias_and_relu(mem_block2[i], bc[level][i], cur_size); } reset_mem_block(mem_block1); waca_papi_read("Hello"); // Layer 3 (MaxPooling) #pragma omp parallel for schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { maxpooling(mem_block2[i], cur_size); } cur_size /= 2; waca_papi_read("Hello"); // Layer 4 (Convolution 64 -> 128) level = 2; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size); } add_bias_and_relu(mem_block1[i], bc[level][i], cur_size); } reset_mem_block(mem_block2); waca_papi_read("Hello"); // Layer 5 (Convolution 128 -> 128) level = 3; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size); } add_bias_and_relu(mem_block2[i], bc[level][i], cur_size); } reset_mem_block(mem_block1); waca_papi_read("Hello"); // Layer 6 (MaxPooling) #pragma omp parallel for schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { maxpooling(mem_block2[i], cur_size); } cur_size /= 2; waca_papi_read("Hello"); // Layer 7 (Convolution 128 -> 256) level = 4; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size); } add_bias_and_relu(mem_block1[i], bc[level][i], cur_size); } reset_mem_block(mem_block2); waca_papi_read("Hello"); // Layer 8 (Convolution 256 -> 256) level = 5; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size); } add_bias_and_relu(mem_block2[i], bc[level][i], cur_size); } reset_mem_block(mem_block1); waca_papi_read("Hello"); // Layer 9 (Convolution 256 -> 256) level = 6; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size); } add_bias_and_relu(mem_block1[i], bc[level][i], cur_size); } reset_mem_block(mem_block2); waca_papi_read("Hello"); // Layer 10 (MaxPooling) #pragma omp parallel for schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { maxpooling(mem_block1[i], cur_size); } cur_size /= 2; waca_papi_read("Hello"); // Layer 11 (Convolution 256 -> 512) level = 7; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size); } add_bias_and_relu(mem_block2[i], bc[level][i], cur_size); } reset_mem_block(mem_block1); waca_papi_read("Hello"); // Layer 12 (Convolution 512 -> 512) level = 8; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size); } add_bias_and_relu(mem_block1[i], bc[level][i], cur_size); } reset_mem_block(mem_block2); waca_papi_read("Hello"); // Layer 13 (Convolution 512 -> 512) level = 9; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size); } add_bias_and_relu(mem_block2[i], bc[level][i], cur_size); } reset_mem_block(mem_block1); waca_papi_read("Hello"); // Layer 14 (MaxPooling) #pragma omp parallel for schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { maxpooling(mem_block2[i], cur_size); } cur_size /= 2; waca_papi_read("Hello"); // Layer 15 (Convolution 512 -> 512) level = 10; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size); } add_bias_and_relu(mem_block1[i], bc[level][i], cur_size); } reset_mem_block(mem_block2); waca_papi_read("Hello"); // Layer 16 (Convolution 512 -> 512) level = 11; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size); } add_bias_and_relu(mem_block2[i], bc[level][i], cur_size); } reset_mem_block(mem_block1); waca_papi_read("Hello"); // Layer 17 (Convolution 512 -> 512) level = 12; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size); } add_bias_and_relu(mem_block1[i], bc[level][i], cur_size); } reset_mem_block(mem_block2); waca_papi_read("Hello"); // Layer 18 (MaxPooling) #pragma omp parallel for schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < cshape[level][0]; i++) { maxpooling(mem_block1[i], cur_size); } cur_size /= 2; waca_papi_read("Hello"); // Layer 19 (Flatten) flatten(mem_block1, mem_block1_dense, cshape[level][0], cur_size, cur_size); if (only_convolution == 1) { return; } waca_papi_read("Hello"); // Layer 20 (Dense) level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 1); reset_mem_block_dense(mem_block1_dense); waca_papi_read("Hello"); // Layer 21 (Dense) level = 1; dense(mem_block2_dense, wd[level], mem_block1_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block1_dense, bd[level], dshape[level][1], 1); reset_mem_block_dense(mem_block2_dense); waca_papi_read("Hello"); // Layer 22 (Dense) level = 2; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 1); softmax(mem_block2_dense, dshape[level][1]); // dump_memory_structure_dense_to_file(mem_block2_dense, dshape[level][1]); waca_papi_read("Hello"); return; } void output_predictions(FILE *out, int only_convolution) { int i; if (only_convolution == 1) { for (i = 0; i < 512*7*7; i++) { fprintf(out, "%g ", mem_block1_dense[i]); } } else { for (i = 0; i < dshape[2][1]; i++) { fprintf(out, "%g ", mem_block2_dense[i]); } } fprintf(out, "\n"); } char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; #ifndef _WIN32 struct timeval timeStart, timeEnd; #else time_t timeStart, timeEnd; #endif double deltaTime; //char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; #ifdef _OPENMP numthreads = omp_get_num_procs() - 1; #endif /*if (numthreads < 1) numthreads = 1; numthreads = 1; */ if (argc != 4 && argc != 5) { printf("Usage: <program.exe> <number of threads> <images list file> <output file> <only_convolution [optional]>\n"); return 0; } //weights_file = argv[1]; numthreads = atoi(argv[1]); printf("Using %d threads\n", numthreads); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = 13; only_convolution = 1; } // ------ PAPI ------- papi_init(); // Start the timer if (clock_gettime(CLOCK_MONOTONIC , &begin)) { exit(EXIT_FAILURE); } // Start time in nanoseconds start = begin.tv_sec*NANOS + begin.tv_nsec; // ------ PAPI ------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s", output_file); return 1; } gettimeofday(&timeStart, NULL); //read_weights(weights_file, lvls); gettimeofday(&timeEnd, NULL); deltaTime = get_seconds(timeStart, timeEnd); printf("Reading weights: %.3lf sec\n", deltaTime); // while (!feof(file_list)) { gettimeofday(&timeStart, NULL); fgets(buf, 1024, file_list); // if (strlen(buf) == 0) { // break; // } printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); normalize_image(); // dump_image(); waca_papi_read("Hello"); get_VGG16_predict(only_convolution); output_predictions(results, only_convolution); waca_papi_read("Hello"); gettimeofday(&timeEnd, NULL); deltaTime = get_seconds(timeStart, timeEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); // } free_memory(); fclose(file_list); return 0; }
selu_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: haitao@openailab.com */ #include <math.h> #include <arm_neon.h> #include "neon_mathfun.h" #include "selu_kernel_arm.h" void selu_kernel(int i, int id, void* data, const float* input, float* output, float alpha, float lambda) { float alpha_lambda = alpha * lambda; int step = (( int* )data)[0]; float32x4_t _one = vdupq_n_f32(1.f); float32x4_t _zero = vdupq_n_f32(0.f); float32x4_t _alpha_lambda = vdupq_n_f32(alpha_lambda); float32x4_t _lambda = vdupq_n_f32(lambda); const float* cur_input = input + id * step; float* cur_output = output + id * step; for (int i = 0; i < (step & -4); i += 4) { float32x4_t _p = vld1q_f32(cur_input); uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _nps = exp_ps(_p); _nps = vsubq_f32(_nps, _one); _nps = vmulq_f32(_nps, _alpha_lambda); _p = vmulq_f32(_p, _lambda); _p = vbslq_f32(_lemask, _nps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { if (cur_input[0] < 0.f) cur_output[0] = (exp(cur_input[0]) - 1.f) * alpha_lambda; else cur_output[0] = cur_input[0] * lambda; cur_input++; cur_output++; } } int selu_run(struct ir_tensor* output_tensor, struct ir_tensor* input_tensor, struct selu_param* selu_param, int num_thread) { float* data = ( float* )input_tensor->data; float* out_data = ( float* )output_tensor->data; float alpha = selu_param->alpha; float lambda = selu_param->lambda; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; selu_kernel(0, 0, &chan_size, data + offset, out_data + offset, alpha, lambda); } return 0; }
hoPartialDerivativeOperator.h
/** \file hoPartialDerivativeOperator.h \brief Partial derivative regularization operator, CPU based. */ #pragma once #include "partialDerivativeOperator.h" #include "hoNDArray_math.h" #include "vector_td_utilities.h" #ifdef USE_OMP #include <omp.h> #endif namespace Gadgetron{ /** \class hoPartialDerivativeOperator \brief CPU implementation of device dependent portions of the partialDerivative operator. */ template <class T, unsigned int D> class hoPartialDerivativeOperator : public partialDerivativeOperator<D, hoNDArray<T> > { public: hoPartialDerivativeOperator() : partialDerivativeOperator< D, hoNDArray<T> >(0) {} hoPartialDerivativeOperator( size_t dimension ) : partialDerivativeOperator<D, hoNDArray<T> >( dimension ) {} virtual ~hoPartialDerivativeOperator() {} virtual void compute_partial_derivative( typename int64d<D>::Type stride, hoNDArray<T> *in, hoNDArray<T> *out, bool accumulate ) { if( !in || !out || in->get_number_of_elements() != out->get_number_of_elements() ){ throw std::runtime_error( "hoPartialDerivativeOperator::compute_partial_derivative : array dimensions mismatch."); } if( in->get_number_of_dimensions() != D || out->get_number_of_dimensions() != D ){ throw std::runtime_error("hoPartialDerivativeOperator::compute_partial_derivative : dimensionality mismatch"); } typename int64d<D>::Type dims = vector_td<long long,D>( from_std_vector<size_t,D>( *(in->get_dimensions().get()) )); #ifdef USE_OMP #pragma omp parallel for #endif for( long long idx=0; idx<in->get_number_of_elements(); idx++ ) { T valN, valC; typename int64d<D>::Type co = idx_to_co(idx, dims); typename int64d<D>::Type coN = (co+dims+stride)%dims; valN = in->get_data_ptr()[co_to_idx(coN, dims)]; valC = in->get_data_ptr()[co_to_idx(co, dims)]; T val = valN-valC; if( accumulate ) out->get_data_ptr()[idx] += val; else out->get_data_ptr()[idx] = val; } } virtual void compute_second_order_partial_derivative( typename int64d<D>::Type forwards_stride, typename int64d<D>::Type adjoint_stride, hoNDArray<T> *in, hoNDArray<T> *out, bool accumulate ) { if( !in || !out || in->get_number_of_elements() != out->get_number_of_elements() ){ throw std::runtime_error( "hoPartialDerivativeOperator::compute_second_order_partial_derivative : array dimensions mismatch."); } if( in->get_number_of_dimensions() != D || out->get_number_of_dimensions() != D ){ throw std::runtime_error( "hoPartialDerivativeOperator::compute_second_order_partial_derivative : dimensionality mismatch"); } typename int64d<D>::Type dims = vector_td<long long,D>( from_std_vector<size_t,D>( *(in->get_dimensions().get()) )); #ifdef USE_OMP #pragma omp parallel for #endif for( long long idx=0; idx<in->get_number_of_elements(); idx++ ) { T valN1, valN2, valC; typename int64d<D>::Type co = idx_to_co(idx, dims); typename int64d<D>::Type coN1 = (co+dims+forwards_stride)%dims; typename int64d<D>::Type coN2 = (co+dims+adjoint_stride)%dims; valN1 = in->get_data_ptr()[co_to_idx(coN1, dims)]; valN2 = in->get_data_ptr()[co_to_idx(coN2, dims)]; valC = in->get_data_ptr()[co_to_idx(co, dims)]; T val = valC+valC-valN1-valN2; if( accumulate ) out->get_data_ptr()[idx] += val; else out->get_data_ptr()[idx] = val; } } }; }
Layer_Im2Mat.h
/* * Layers.h * rl * * Created by Guido Novati on 11.02.16. * Copyright 2016 ETH Zurich. All rights reserved. * */ #pragma once #include "Layers.h" // Im2MatLayer gets as input an image of sizes InX * InY * InC // and prepares the output for convolution with a filter of size KnY * KnX * KnC // and output an image of size OpY * OpX * KnC template <int InX, int InY, int InC, // input image: x:width, y:height, c:color channels int KnX, int KnY, int KnC, // filter: x:width, y:height, c:color channels int Sx, int Sy, // stride x/y int Px, int Py, // padding x/y int OpX, int OpY // output img: x:width, y:height, same color channels as KnC > struct Im2MatLayer : public Layer { // Im2ColLayer has no parameters: Params *allocate_params() const override { return nullptr; } Im2MatLayer(const int _ID) : Layer(OpY * OpX * KnY * KnX * InC, _ID) { static_assert(Sx > 0 && Sy > 0, "Invalid stride"); static_assert(Px >= 0 && Py >= 0, "Invalid kernel"); print(); } void print() { printf("(%d) Im2Col transform Img:[%d %d %d] to Mat:[%d %d %d %d %d] ", ID, InY, InX, InC, OpY, OpX, KnY, KnX, InC); printf("with Stride:[%d %d] and Padding:[%d %d]\n", Sx, Sy, Px, Py); } void forward(const std::vector<Activation *> &act, const std::vector<Params *> &param) const override { const int batchSize = act[ID]->batchSize; assert(act[ID - 1]->layersSize == InX * InY * InC); assert(act[ID]->layersSize == OpY * OpX * KnY * KnX * InC); Im2Mat(batchSize, act[ID - 1]->output, act[ID]->output); } void bckward(const std::vector<Activation *> &act, const std::vector<Params *> &param, const std::vector<Params *> &grad) const override { const int batchSize = act[ID]->batchSize; assert(act[ID - 1]->layersSize == InX * InY * InC); assert(act[ID]->layersSize == OpY * OpX * KnY * KnX * InC); Mat2Im(batchSize, act[ID]->dError_dOutput, act[ID - 1]->dError_dOutput); } void Im2Mat(const int BS, const Real *const lin_inp, Real *const lin_out) const { using InputImages = Real[][InY][InX][InC]; using OutputMatrices = Real[][OpY][OpX][KnY][KnX][InC]; // Convert pointers to a reference to multi dim arrays for easy access: // 1) INP is a reference: i'm not creating new data // 2) The type of INP is an array of sizes [???][InY][InX][InC] // 3) The first dimension is the batchsize and is not known at compile time // 4) Because it's the slowest index the compiler does not complain // 5) The conversion should be read from right to left: (A) convert lin_inp // to pointer to a static multi-array of size [???][InY][InX][InC] // (B) Return the reference of the memory space pointed at by a. const InputImages &INP = *(InputImages *)lin_inp; // (B)( A ) OutputMatrices &OUT = *(OutputMatrices *)lin_out; // clean up memory space of lin_out. Why? Because padding, that's why. #if 1 for (int bc = 0; bc < BS; bc++) for (int oy = 0; oy < OpY; oy++) for (int ox = 0; ox < OpX; ox++) for (int fy = 0; fy < KnY; fy++) for (int fx = 0; fx < KnX; fx++) for (int ic = 0; ic < InC; ic++) OUT[bc][oy][ox][fy][fx][ic] = 0; #else memset(lin_out, 0, BS * OpY * OpX * KnY * KnX * InC * sizeof(Real)); #endif #pragma omp parallel for collapse(5) for (int bc = 0; bc < BS; ++bc) { for (int oy = 0; oy < OpY; ++oy) { for (int ox = 0; ox < OpX; ++ox) { for (int fy = 0; fy < KnY; ++fy) { for (int fx = 0; fx < KnX; ++fx) { // index along input map of the convolution op const int ix = ox * Sx - Px + fx; const int iy = oy * Sy - Py + fy; // padding: skip addition if outside input boundaries if (ix < 0 || ix >= InX || iy < 0 || iy >= InY) { continue; } for (int ic = 0; ic < InC; ++ic) { OUT[bc][oy][ox][fy][fx][ic] = INP[bc][iy][ix][ic]; } } } } } } } void Mat2Im(const int BS, const Real *const lin_inp, Real *const lin_out) const { using InputImages = Real[][InY][InX][InC]; using OutputMatrices = Real[][OpY][OpX][KnY][KnX][InC]; // Output is d Loss d Input, same size as INP before: InputImages &dLdINP = *(InputImages *)lin_out; // Input is d Loss d Output, same size as OUT before: const OutputMatrices &dLdOUT = *(OutputMatrices *)lin_inp; // Mat2Im accesses memory with plus equal: reset field #if 1 for (int bc = 0; bc < BS; bc++) for (int iy = 0; iy < InY; iy++) for (int ix = 0; ix < InX; ix++) for (int ic = 0; ic < InC; ic++) dLdINP[bc][iy][ix][ic] = 0; #else memset(lin_out, 0, BS * InY * InX * InC * sizeof(Real)); #endif #pragma omp parallel for collapse(5) for (int bc = 0; bc < BS; ++bc) { for (int oy = 0; oy < OpY; ++oy) { for (int ox = 0; ox < OpX; ++ox) { for (int fy = 0; fy < KnY; ++fy) { for (int fx = 0; fx < KnX; ++fx) { // index along input map of the convolution op const int ix = ox * Sx - Px + fx; const int iy = oy * Sy - Py + fy; // padding: skip addition if outside input boundaries if (ix < 0 || ix >= InX || iy < 0 || iy >= InY) { continue; } for (int ic = 0; ic < InC; ++ic) { dLdINP[bc][iy][ix][ic] += dLdOUT[bc][oy][ox][fy][fx][ic]; } } } } } } } void init(std::mt19937 &G, const std::vector<Params *> &P) const override {} };