source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_binop__minus_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fc64) // A*D function (colscale): GB (_AxD__minus_fc64) // D*A function (rowscale): GB (_DxB__minus_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fc64) // C=scalar+B GB (_bind1st__minus_fc64) // C=scalar+B' GB (_bind1st_tran__minus_fc64) // C=A+scalar GB (_bind2nd__minus_fc64) // C=A'+scalar GB (_bind2nd_tran__minus_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_minus (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_minus (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FC64 || GxB_NO_MINUS_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fc64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_minus (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_minus (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_minus (x, aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_minus (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_fp32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_int8 // op(A') function: GB_tran__lnot_fp32_int8 // C type: float // A type: int8_t // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_int8 ( float *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pf_fold.c
/* * partiton function for single RNA secondary structures * * Simplified interfaces and backward compatibility * wrappers * * Ivo L Hofacker + Ronny Lorenz * Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /*###########################################*/ /*# deprecated functions below #*/ /*###########################################*/ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> /* #defines FLT_MAX ... */ #include <limits.h> #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/gquad.h" #include "ViennaRNA/constraints/hard.h" #include "ViennaRNA/constraints/soft.h" #include "ViennaRNA/mfe.h" #include "ViennaRNA/part_func.h" #ifdef _OPENMP #include <omp.h> #endif /* ################################# # GLOBAL VARIABLES # ################################# */ PUBLIC int st_back = 0; /* ################################# # PRIVATE VARIABLES # ################################# */ /* some backward compatibility stuff */ PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; PRIVATE int backward_compat = 0; #ifdef _OPENMP #pragma omp threadprivate(backward_compat_compound, backward_compat) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE float wrap_pf_fold(const char *sequence, char *structure, vrna_exp_param_t *parameters, int calculate_bppm, int is_constrained, int is_circular); PRIVATE double wrap_mean_bp_distance(FLT_OR_DBL *p, int length, int *index, int turn); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PRIVATE double wrap_mean_bp_distance(FLT_OR_DBL *p, int length, int *index, int turn) { int i, j; double d = 0.; /* compute the mean base pair distance in the thermodynamic ensemble */ /* <d> = \sum_{a,b} p_a p_b d(S_a,S_b) * this can be computed from the pair probs p_ij as * <d> = \sum_{ij} p_{ij}(1-p_{ij}) */ for (i = 1; i <= length; i++) for (j = i + turn + 1; j <= length; j++) d += p[index[i] - j] * (1 - p[index[i] - j]); return 2 * d; } PRIVATE float wrap_pf_fold(const char *sequence, char *structure, vrna_exp_param_t *parameters, int calculate_bppm, int is_constrained, int is_circular) { vrna_fold_compound_t *vc; vrna_md_t md; vc = NULL; /* we need vrna_exp_param_t datastructure to correctly init default hard constraints */ if (parameters) md = parameters->model_details; else set_model_details(&md); /* get global default parameters */ md.circ = is_circular; md.compute_bpp = calculate_bppm; vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT); /* prepare exp_params and set global pf_scale */ vc->exp_params = vrna_exp_params(&(vc->params->model_details)); vc->exp_params->pf_scale = pf_scale; if (is_constrained && structure) { unsigned int constraint_options = 0; constraint_options |= VRNA_CONSTRAINT_DB | VRNA_CONSTRAINT_DB_PIPE | VRNA_CONSTRAINT_DB_DOT | VRNA_CONSTRAINT_DB_X | VRNA_CONSTRAINT_DB_ANG_BRACK | VRNA_CONSTRAINT_DB_RND_BRACK; vrna_constraints_add(vc, (const char *)structure, constraint_options); } if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; iindx = backward_compat_compound->iindx; return vrna_pf(vc, structure); } PUBLIC vrna_ep_t * stackProb(double cutoff) { if (!(backward_compat_compound && backward_compat)) { vrna_message_warning("stackProb: " "run pf_fold() first!"); return NULL; } else if (!backward_compat_compound->exp_matrices->probs) { vrna_message_warning("stackProb: " "probs == NULL!"); return NULL; } return vrna_stack_prob(backward_compat_compound, cutoff); } PUBLIC char * centroid(int length, double *dist) { if (pr == NULL) { vrna_message_warning("centroid: " "pr == NULL. You need to call pf_fold() before centroid()"); return NULL; } return vrna_centroid_from_probs(length, dist, pr); } PUBLIC double mean_bp_dist(int length) { /* compute the mean base pair distance in the thermodynamic ensemble */ /* <d> = \sum_{a,b} p_a p_b d(S_a,S_b) * this can be computed from the pair probs p_ij as * <d> = \sum_{ij} p_{ij}(1-p_{ij}) */ int i, j, *my_iindx; double d = 0; if (pr == NULL) { vrna_message_warning("mean_bp_dist: " "pr == NULL. You need to call pf_fold() before mean_bp_dist()"); return d; } my_iindx = vrna_idx_row_wise(length); for (i = 1; i <= length; i++) for (j = i + TURN + 1; j <= length; j++) d += pr[my_iindx[i] - j] * (1 - pr[my_iindx[i] - j]); free(my_iindx); return 2 * d; } /* get the free energy of a subsequence from the q[] array */ PUBLIC double get_subseq_F(int i, int j) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->q) { int *my_iindx = backward_compat_compound->iindx; vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; FLT_OR_DBL *q = backward_compat_compound->exp_matrices->q; return (-log(q[my_iindx[i] - j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT / 1000.0; } vrna_message_warning("get_subseq_F: " "call pf_fold() to fill q[] array before calling get_subseq_F()"); return 0.; /* we will never get to this point */ } /*----------------------------------------------------------------------*/ PUBLIC double expHairpinEnergy(int u, int type, short si1, short sj1, const char *string) { /* compute Boltzmann weight of a hairpin loop, multiply by scale[u+2] */ vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; double q, kT; kT = pf_params->kT; /* kT in cal/mol */ if (u <= 30) q = pf_params->exphairpin[u]; else q = pf_params->exphairpin[30] * exp(-(pf_params->lxc * log(u / 30.)) * 10. / kT); if ((tetra_loop) && (u == 4)) { char tl[7] = { 0 }, *ts; strncpy(tl, string, 6); if ((ts = strstr(pf_params->Tetraloops, tl))) return pf_params->exptetra[(ts - pf_params->Tetraloops) / 7]; } if ((tetra_loop) && (u == 6)) { char tl[9] = { 0 }, *ts; strncpy(tl, string, 6); if ((ts = strstr(pf_params->Hexaloops, tl))) return pf_params->exphex[(ts - pf_params->Hexaloops) / 9]; } if (u == 3) { char tl[6] = { 0 }, *ts; strncpy(tl, string, 5); if ((ts = strstr(pf_params->Triloops, tl))) return pf_params->exptri[(ts - pf_params->Triloops) / 6]; if (type > 2) q *= pf_params->expTermAU; } else { /* no mismatches for tri-loops */ q *= pf_params->expmismatchH[type][si1][sj1]; } return q; } PUBLIC double expLoopEnergy(int u1, int u2, int type, int type2, short si1, short sj1, short sp1, short sq1) { /* compute Boltzmann weight of interior loop, * multiply by scale[u1+u2+2] for scaling */ double z = 0; int no_close = 0; vrna_exp_param_t *pf_params = backward_compat_compound->exp_params; if ((no_closingGU) && ((type2 == 3) || (type2 == 4) || (type == 2) || (type == 4))) no_close = 1; if ((u1 == 0) && (u2 == 0)) { /* stack */ z = pf_params->expstack[type][type2]; } else if (no_close == 0) { if ((u1 == 0) || (u2 == 0)) { /* bulge */ int u; u = (u1 == 0) ? u2 : u1; z = pf_params->expbulge[u]; if (u2 + u1 == 1) { z *= pf_params->expstack[type][type2]; } else { if (type > 2) z *= pf_params->expTermAU; if (type2 > 2) z *= pf_params->expTermAU; } } else { /* interior loop */ if (u1 + u2 == 2) { /* size 2 is special */ z = pf_params->expint11[type][type2][si1][sj1]; } else if ((u1 == 1) && (u2 == 2)) { z = pf_params->expint21[type][type2][si1][sq1][sj1]; } else if ((u1 == 2) && (u2 == 1)) { z = pf_params->expint21[type2][type][sq1][si1][sp1]; } else if ((u1 == 2) && (u2 == 2)) { z = pf_params->expint22[type][type2][si1][sp1][sq1][sj1]; } else if (((u1 == 2) && (u2 == 3)) || ((u1 == 3) && (u2 == 2))) { /*2-3 is special*/ z = pf_params->expinternal[5] * pf_params->expmismatch23I[type][si1][sj1] * pf_params->expmismatch23I[type2][sq1][sp1]; z *= pf_params->expninio[2][1]; } else if ((u1 == 1) || (u2 == 1)) { /*1-n is special*/ z = pf_params->expinternal[u1 + u2] * pf_params->expmismatch1nI[type][si1][sj1] * pf_params->expmismatch1nI[type2][sq1][sp1]; z *= pf_params->expninio[2][abs(u1 - u2)]; } else { z = pf_params->expinternal[u1 + u2] * pf_params->expmismatchI[type][si1][sj1] * pf_params->expmismatchI[type2][sq1][sp1]; z *= pf_params->expninio[2][abs(u1 - u2)]; } } } return z; } PUBLIC void init_pf_circ_fold(int length) { /* DO NOTHING */ } PUBLIC void init_pf_fold(int length) { /* DO NOTHING */ } /** *** Allocate memory for all matrices and other stuff **/ PUBLIC void free_pf_arrays(void) { if (backward_compat_compound && backward_compat) { vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = NULL; backward_compat = 0; iindx = NULL; } } PUBLIC FLT_OR_DBL * export_bppm(void) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->probs) return backward_compat_compound->exp_matrices->probs; return NULL; } /*-------------------------------------------------------------------------*/ /* make arrays used for pf_fold available to other routines */ PUBLIC int get_pf_arrays(short **S_p, short **S1_p, char **ptype_p, FLT_OR_DBL **qb_p, FLT_OR_DBL **qm_p, FLT_OR_DBL **q1k_p, FLT_OR_DBL **qln_p) { if (backward_compat_compound) { if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->qb) { *S_p = backward_compat_compound->sequence_encoding2; *S1_p = backward_compat_compound->sequence_encoding; *ptype_p = backward_compat_compound->ptype_pf_compat; *qb_p = backward_compat_compound->exp_matrices->qb; *qm_p = backward_compat_compound->exp_matrices->qm; *q1k_p = backward_compat_compound->exp_matrices->q1k; *qln_p = backward_compat_compound->exp_matrices->qln; return 1; } } return 0; } /*-----------------------------------------------------------------*/ PUBLIC float pf_fold(const char *sequence, char *structure) { return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 0); } PUBLIC float pf_circ_fold(const char *sequence, char *structure) { return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 1); } PUBLIC float pf_fold_par(const char *sequence, char *structure, vrna_exp_param_t *parameters, int calculate_bppm, int is_constrained, int is_circular) { return wrap_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained, is_circular); } PUBLIC char * pbacktrack(char *seq) { int n = (int)strlen(seq); return vrna_pbacktrack5(backward_compat_compound, n); } PUBLIC char * pbacktrack5(char *seq, int length) { /* the seq parameter must no differ to the one stored globally anyway, so we just ignore it */ return vrna_pbacktrack5(backward_compat_compound, length); } PUBLIC char * pbacktrack_circ(char *seq) { char *structure; vrna_md_t *md; structure = NULL; if (backward_compat_compound) { md = &(backward_compat_compound->exp_params->model_details); if (md->circ && backward_compat_compound->exp_matrices->qm2) structure = vrna_pbacktrack(backward_compat_compound); } return structure; } PUBLIC void update_pf_params(int length) { if (backward_compat_compound && backward_compat) { vrna_md_t md; set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC void update_pf_params_par(int length, vrna_exp_param_t *parameters) { if (backward_compat_compound && backward_compat) { vrna_md_t md; if (parameters) { vrna_exp_params_subst(backward_compat_compound, parameters); } else { set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); } /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC char * get_centroid_struct_gquad_pr(int length, double *dist) { return vrna_centroid(backward_compat_compound, dist); } PUBLIC void assign_plist_gquad_from_pr(vrna_ep_t **pl, int length, /* ignored */ double cut_off) { if (!backward_compat_compound) *pl = NULL; else if (!backward_compat_compound->exp_matrices->probs) *pl = NULL; else *pl = vrna_plist_from_probs(backward_compat_compound, cut_off); } PUBLIC double mean_bp_distance(int length) { if (backward_compat_compound) if (backward_compat_compound->exp_matrices) if (backward_compat_compound->exp_matrices->probs) return vrna_mean_bp_distance(backward_compat_compound); vrna_message_warning("mean_bp_distance: " "you need to call vrna_pf_fold first"); return 0.; /* we will never get to this point */ } PUBLIC double mean_bp_distance_pr(int length, FLT_OR_DBL *p) { double d = 0; int *index = vrna_idx_row_wise((unsigned int)length); if (p == NULL) { vrna_message_warning("mean_bp_distance_pr: " "p == NULL. You need to supply a valid probability matrix for mean_bp_distance_pr()"); return d; } d = wrap_mean_bp_distance(p, length, index, TURN); free(index); return d; } #endif
exchange_boundary.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ // perform a (intra-level) ghost zone exchange on vector id // NOTE exchange_boundary() only exchanges the boundary. // It will not enforce any boundary conditions // BC's are either the responsibility of a separate function or should be fused into the stencil // The argument shape indicates which of faces, edges, and corners on each box must be exchanged // If the specified shape exceeds the range of defined shapes, the code will default to STENCIL_SHAPE_BOX (i.e. exchange faces, edges, and corners) void exchange_boundary(level_type * level, int id, int shape){ double _timeCommunicationStart = getTime(); double _timeStart,_timeEnd; if(shape>=STENCIL_MAX_SHAPES)shape=STENCIL_SHAPE_BOX; // shape must be < STENCIL_MAX_SHAPES in order to safely index into exchange_ghosts[] int my_tag = (level->tag<<4) | shape; int buffer=0; int n; #ifdef USE_MPI int nMessages = level->exchange_ghosts[shape].num_recvs + level->exchange_ghosts[shape].num_sends; MPI_Request *recv_requests = level->exchange_ghosts[shape].requests; MPI_Request *send_requests = level->exchange_ghosts[shape].requests + level->exchange_ghosts[shape].num_recvs; // loop through packed list of MPI receives and prepost Irecv's... if(level->exchange_ghosts[shape].num_recvs>0){ _timeStart = getTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level->exchange_ghosts[shape].num_recvs;n++){ MPI_Irecv(level->exchange_ghosts[shape].recv_buffers[n], level->exchange_ghosts[shape].recv_sizes[n], MPI_DOUBLE, level->exchange_ghosts[shape].recv_ranks[n], my_tag, MPI_COMM_WORLD, &recv_requests[n] ); } _timeEnd = getTime(); level->timers.ghostZone_recv += (_timeEnd-_timeStart); } // pack MPI send buffers... if(level->exchange_ghosts[shape].num_blocks[0]){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[shape].num_blocks[0]) for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[0];buffer++){ CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[0][buffer]); } _timeEnd = getTime(); level->timers.ghostZone_pack += (_timeEnd-_timeStart); } // loop through MPI send buffers and post Isend's... if(level->exchange_ghosts[shape].num_sends>0){ _timeStart = getTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level->exchange_ghosts[shape].num_sends;n++){ MPI_Isend(level->exchange_ghosts[shape].send_buffers[n], level->exchange_ghosts[shape].send_sizes[n], MPI_DOUBLE, level->exchange_ghosts[shape].send_ranks[n], my_tag, MPI_COMM_WORLD, &send_requests[n] ); } _timeEnd = getTime(); level->timers.ghostZone_send += (_timeEnd-_timeStart); } #endif // exchange locally... try and hide within Isend latency... if(level->exchange_ghosts[shape].num_blocks[1]){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[shape].num_blocks[1]) for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[1];buffer++){ CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[1][buffer]); } _timeEnd = getTime(); level->timers.ghostZone_local += (_timeEnd-_timeStart); } // wait for MPI to finish... #ifdef USE_MPI if(nMessages){ _timeStart = getTime(); MPI_Waitall(nMessages,level->exchange_ghosts[shape].requests,level->exchange_ghosts[shape].status); _timeEnd = getTime(); level->timers.ghostZone_wait += (_timeEnd-_timeStart); } // unpack MPI receive buffers if(level->exchange_ghosts[shape].num_blocks[2]){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[shape].num_blocks[2]) for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[2];buffer++){ CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[2][buffer]); } _timeEnd = getTime(); level->timers.ghostZone_unpack += (_timeEnd-_timeStart); } #endif level->timers.ghostZone_total += (double)(getTime()-_timeCommunicationStart); }
GB_subref_template.c
//------------------------------------------------------------------------------ // GB_subref_template: C = A(I,J) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // GB_subref_templat extracts a submatrix, C = A(I,J). The method is done in // two phases. Phase 1 just counts the entries in C, and phase 2 constructs // the pattern and values of C. There are 3 kinds of subref: // // symbolic: C(i,j) is the position of A(I(i),J(j)) in the matrix A // iso: C = A(I,J), extracting the pattern only, not the values // numeric: C = A(I,J), extracting the pattern and values #if defined ( GB_SYMBOLIC ) // symbolic method must tolerate zombies #define GB_Ai(p) GBI_UNFLIP (Ai, p, avlen) #else // iso and non-iso numeric methods will not see any zombies #define GB_Ai(p) GBI (Ai, p, avlen) #endif // to iterate across all entries in a bucket: #define GB_for_each_index_in_bucket(inew,i) \ for (int64_t inew = Mark [i] - 1 ; inew >= 0 ; inew = Inext [inew]) //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // get A and I //-------------------------------------------------------------------------- const int64_t *restrict Ai = A->i ; const int64_t avlen = A->vlen ; // these values are ignored if Ikind == GB_LIST int64_t ibegin = Icolon [GxB_BEGIN] ; int64_t iinc = Icolon [GxB_INC ] ; int64_t inc = (iinc < 0) ? (-iinc) : iinc ; #ifdef GB_DEBUG int64_t iend = Icolon [GxB_END ] ; #endif //-------------------------------------------------------------------------- // phase1: count entries in each C(:,kC); phase2: compute C //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast < 0) ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; } // a coarse task accesses all of I for all its vectors int64_t pI = 0 ; int64_t pI_end = nI ; int64_t ilen = nI ; ASSERT (0 <= kfirst && kfirst <= klast && klast < Cnvec) ; //---------------------------------------------------------------------- // compute all vectors C(:,kfirst:klast) for this task //---------------------------------------------------------------------- for (int64_t kC = kfirst ; kC <= klast ; kC++) { //------------------------------------------------------------------ // get C(:,kC) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) // phase1 simply counts the # of entries in C(*,kC). int64_t clen = 0 ; #else // This task computes all or part of C(:,kC), which are the entries // in Ci,Cx [pC:pC_end-1]. int64_t pC, pC_end ; if (fine_task) { // A fine task computes a slice of C(:,kC) pC = TaskList [taskid ].pC ; pC_end = TaskList [taskid+1].pC ; ASSERT (Cp [kC] <= pC && pC <= pC_end && pC_end <= Cp [kC+1]) ; } else { // The vectors of C are never sliced for a coarse task, so this // task computes all of C(:,kC). pC = Cp [kC] ; pC_end = Cp [kC+1] ; } int64_t clen = pC_end - pC ; if (clen == 0) continue ; #endif //------------------------------------------------------------------ // get A(:,kA) //------------------------------------------------------------------ int64_t pA, pA_end ; if (fine_task) { // a fine task computes a slice of a single vector C(:,kC). // The task accesses Ai,Ax [pA:pA_end-1], which holds either // the entire vector A(imin:imax,kA) for method 6, the entire // dense A(:,kA) for methods 1 and 2, or a slice of the // A(imin:max,kA) vector for all other methods. pA = TaskList [taskid].pA ; pA_end = TaskList [taskid].pA_end ; } else { // a coarse task computes the entire vector C(:,kC). The task // accesses all of A(imin:imax,kA), for most methods, or all of // A(:,kA) for methods 1 and 2. The vector A(*,kA) appears in // Ai,Ax [pA:pA_end-1]. pA = Ap_start [kC] ; pA_end = Ap_end [kC] ; } int64_t alen = pA_end - pA ; if (alen == 0) continue ; //------------------------------------------------------------------ // get I //------------------------------------------------------------------ if (fine_task) { // A fine task accesses I [pI:pI_end-1]. For methods 2 and 6, // pI:pI_end is a subset of the entire 0:nI-1 list. For all // other methods, pI = 0 and pI_end = nI, and the task can // access all of I. pI = TaskList [taskid].pB ; pI_end = TaskList [taskid].pB_end ; ilen = pI_end - pI ; } //------------------------------------------------------------------ // determine the method to use //------------------------------------------------------------------ int method ; if (fine_task) { // The method that the fine task uses for its slice of A(*,kA) // and C(*,kC) has already been determined by GB_subref_slice. method = (int) (-TaskList [taskid].klast) ; } else { // determine the method based on A(*,kA) and I method = GB_subref_method (NULL, NULL, alen, avlen, Ikind, nI, (Mark != NULL), need_qsort, iinc, nduplicates) ; } //------------------------------------------------------------------ // extract C (:,kC) = A (I,kA): consider all cases //------------------------------------------------------------------ switch (method) { //-------------------------------------------------------------- case 1 : // C(:,kC) = A(:,kA) where A(:,kA) is dense //-------------------------------------------------------------- // A (:,kA) has not been sliced ASSERT (Ikind == GB_ALL) ; ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // copy the entire vector and construct indices #if defined ( GB_PHASE_1_OF_2 ) clen = ilen ; #else for (int64_t k = 0 ; k < ilen ; k++) { int64_t inew = k + pI ; ASSERT (inew == GB_ijlist (I, inew, Ikind, Icolon)) ; ASSERT (inew == GB_Ai (pA + inew)) ; Ci [pC + k] = inew ; } GB_COPY_RANGE (pC, pA + pI, ilen) ; #endif break ; //-------------------------------------------------------------- case 2 : // C(:,kC) = A(I,kA) where A(I,kA) is dense //-------------------------------------------------------------- // This method handles any kind of list I, but A(:,kA) // must be dense. A(:,kA) has not been sliced. ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // scan I and get the entry in A(:,kA) via direct lookup #if defined ( GB_PHASE_1_OF_2 ) clen = ilen ; #else for (int64_t k = 0 ; k < ilen ; k++) { // C(inew,kC) = A(i,kA), and it always exists. int64_t inew = k + pI ; int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ; ASSERT (i == GB_Ai (pA + i)) ; Ci [pC + k] = inew ; GB_COPY_ENTRY (pC + k, pA + i) ; } #endif break ; //-------------------------------------------------------------- case 3 : // the list I has a single index, ibegin //-------------------------------------------------------------- // binary search in GB_subref_phase0 has already found it. // This can be any Ikind with nI=1: GB_ALL with A->vlen=1, // GB_RANGE with ibegin==iend, GB_STRIDE such as 0:-1:0 // (with length 1), or a GB_LIST with ni=1. // Time: 50x faster ASSERT (!fine_task) ; ASSERT (alen == 1) ; ASSERT (nI == 1) ; ASSERT (GB_Ai (pA) == GB_ijlist (I, 0, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen = 1 ; #else Ci [pC] = 0 ; GB_COPY_ENTRY (pC, pA) ; #endif break ; //-------------------------------------------------------------- case 4 : // Ikind is ":", thus C(:,kC) = A (:,kA) //-------------------------------------------------------------- // Time: 1x faster but low speedup on the Mac. Why? // Probably memory bound since it is just memcpy's. ASSERT (Ikind == GB_ALL && ibegin == 0) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else #if defined ( GB_SYMBOLIC ) if (nzombies == 0) { memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ; } else { // with zombies for (int64_t k = 0 ; k < alen ; k++) { // symbolic C(:,kC) = A(:,kA) where A has zombies int64_t i = GB_Ai (pA + k) ; ASSERT (i == GB_ijlist (I, i, Ikind, Icolon)) ; Ci [pC + k] = i ; } } #else memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ; #endif GB_COPY_RANGE (pC, pA, alen) ; #endif break ; //-------------------------------------------------------------- case 5 : // Ikind is GB_RANGE = ibegin:iend //-------------------------------------------------------------- // Time: much faster. Good speedup too. ASSERT (Ikind == GB_RANGE) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else for (int64_t k = 0 ; k < alen ; k++) { int64_t i = GB_Ai (pA + k) ; int64_t inew = i - ibegin ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; Ci [pC + k] = inew ; } GB_COPY_RANGE (pC, pA, alen) ; #endif break ; //-------------------------------------------------------------- case 6 : // I is short vs nnz (A (:,kA)), use binary search //-------------------------------------------------------------- // Time: very slow unless I is very short and A(:,kA) is // very long. // This case can handle any kind of I, and A(:,kA) of any // properties. For a fine task, A(:,kA) has not been // sliced; I has been sliced instead. // If the I bucket inverse has not been created, this // method is the only option. Alternatively, if nI = // length (I) is << nnz (A (:,kA)), then scanning I and // doing a binary search of A (:,kA) is faster than doing a // linear-time search of A(:,kA) and a lookup into the I // bucket inverse. // The vector of C is constructed in sorted order, so no // sort is needed. // A(:,kA) has not been sliced. ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // scan I, in order, and search for the entry in A(:,kA) for (int64_t k = 0 ; k < ilen ; k++) { // C(inew,kC) = A (i,kA), if it exists. // i = I [inew] ; or from a colon expression int64_t inew = k + pI ; int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ; bool found ; int64_t pleft = pA ; int64_t pright = pA_end - 1 ; #if defined ( GB_SYMBOLIC ) bool is_zombie ; GB_BINARY_SEARCH_ZOMBIE (i, Ai, pleft, pright, found, nzombies, is_zombie) ; #else GB_BINARY_SEARCH (i, Ai, pleft, pright, found) ; #endif if (found) { ASSERT (i == GB_Ai (pleft)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pleft) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- case 7 : // I is ibegin:iinc:iend with iinc > 1 //-------------------------------------------------------------- // Time: 1 thread: C=A(1:2:n,:) is 3x slower // but has good speedup. About as fast with // enough threads. ASSERT (Ikind == GB_STRIDE && iinc > 1) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present; see if it is in ibegin:iinc:iend int64_t i = GB_Ai (pA + k) ; ASSERT (ibegin <= i && i <= iend) ; i = i - ibegin ; if (i % iinc == 0) { // i is in the sequence ibegin:iinc:iend #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else int64_t inew = i / iinc ; ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //---------------------------------------------------------- case 8 : // I = ibegin:(-iinc):iend, with iinc < -1 //---------------------------------------------------------- // Time: 2x slower for iinc = -2 or -8. // Good speedup though. Faster for // large values (iinc = -128). ASSERT (Ikind == GB_STRIDE && iinc < -1) ; for (int64_t k = alen - 1 ; k >= 0 ; k--) { // A(i,kA) present; see if it is in ibegin:iinc:iend int64_t i = GB_Ai (pA + k) ; ASSERT (iend <= i && i <= ibegin) ; i = ibegin - i ; if (i % inc == 0) { // i is in the sequence ibegin:iinc:iend #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else int64_t inew = i / inc ; ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //---------------------------------------------------------- case 9 : // I = ibegin:(-1):iend //---------------------------------------------------------- // Time: much faster. Good speedup. ASSERT (Ikind == GB_STRIDE && iinc == -1) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else for (int64_t k = alen - 1 ; k >= 0 ; k--) { // A(i,kA) is present int64_t i = GB_Ai (pA + k) ; int64_t inew = (ibegin - i) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; } #endif break ; //-------------------------------------------------------------- case 10 : // I unsorted, and C needs qsort, duplicates OK //-------------------------------------------------------------- // Time: with one thread: 2x slower, probably // because of the qsort. Good speedup however. This used // if qsort is needed but ndupl == 0. Try a method that // needs qsort, but no duplicates? // Case 10 works well when I has many entries and A(:,kA) // has few entries. C(:,kC) must be sorted after this pass. ASSERT (Ikind == GB_LIST) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // traverse bucket i for all indices inew where // i == I [inew] or where i is from a colon expression GB_for_each_index_in_bucket (inew, i) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } // TODO: skip the sort if C is allowed to be jumbled on // output. Flag C as jumbled instead. #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; if (!fine_task) { // a coarse task owns this entire C(:,kC) vector, so // the sort can be done now. The sort for vectors // handled by multiple fine tasks must wait until all // task are completed, below in the post sort. pC = Cp [kC] ; #if defined ( GB_ISO_SUBREF ) // iso numeric subref C=A(I,J) // just sort the pattern of C(:,kC) GB_qsort_1 (Ci + pC, clen) ; #else // sort the pattern of C(:,kC), and the values GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1), GB_CSIZE2, clen) ; #endif } #endif break ; //-------------------------------------------------------------- case 11 : // I not contiguous, with duplicates. No qsort needed //-------------------------------------------------------------- // Case 11 works well when I has many entries and A(:,kA) // has few entries. It requires that I be sorted on input, // so that no sort is required for C(:,kC). It is // otherwise identical to Case 10. ASSERT (Ikind == GB_LIST) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // traverse bucket i for all indices inew where // i == I [inew] or where i is from a colon expression GB_for_each_index_in_bucket (inew, i) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- case 12 : // I not contiguous, no duplicates. No qsort needed. //-------------------------------------------------------------- // Identical to Case 11, except GB_for_each_index_in_bucket // just needs to iterate 0 or 1 times. Works well when I // has many entries and A(:,kA) has few entries. ASSERT (Ikind == GB_LIST && nduplicates == 0) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // bucket i has at most one index inew such that // i == I [inew] int64_t inew = Mark [i] - 1 ; if (inew >= 0) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- default: ; //-------------------------------------------------------------- } //------------------------------------------------------------------ // final count of nnz (C (:,j)) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (fine_task) { TaskList [taskid].pC = clen ; } else { Cp [kC] = clen ; } #endif } } //-------------------------------------------------------------------------- // phase2: post sort for any vectors handled by fine tasks with method 10 //-------------------------------------------------------------------------- #if defined ( GB_PHASE_2_OF_2 ) { if (post_sort) { int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { int64_t kC = TaskList [taskid].kfirst ; bool do_post_sort = (TaskList [taskid].len != 0) ; if (do_post_sort) { // This is the first fine task with method 10 for C(:,kC). // The vector C(:,kC) must be sorted, since method 10 left // it with unsorted indices. int64_t pC = Cp [kC] ; int64_t clen = Cp [kC+1] - pC ; #if defined ( GB_ISO_SUBREF ) { // iso numeric subref C=A(I,J) // just sort the pattern of C(:,kC) GB_qsort_1 (Ci + pC, clen) ; } #else { // sort the pattern of C(:,kC), and the values GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1), GB_CSIZE2, clen) ; } #endif } } } } #endif } #undef GB_Ai #undef GB_for_each_index_in_bucket #undef GB_COPY_RANGE #undef GB_COPY_ENTRY #undef GB_CSIZE1 #undef GB_CSIZE2 #undef GB_SYMBOLIC #undef GB_ISO_SUBREF
omp.c
// RUN: mlir-clang %s --function=* -fopenmp -S | FileCheck %s void square(double* x, int sstart, int send, int sinc) { #pragma omp parallel for for(int i=sstart; i < send; i+= sinc) { x[i] = i; } } // CHECK: func @square(%arg0: memref<?xf64>, %arg1: i32, %arg2: i32, %arg3: i32) attributes {llvm.linkage = #llvm.linkage<external>} { // CHECK-NEXT: %0 = arith.index_cast %arg1 : i32 to index // CHECK-NEXT: %1 = arith.index_cast %arg2 : i32 to index // CHECK-NEXT: %2 = arith.index_cast %arg3 : i32 to index // CHECK-NEXT: scf.parallel (%arg4) = (%0) to (%1) step (%2) { // CHECK-NEXT: %3 = arith.index_cast %arg4 : index to i32 // CHECK-NEXT: %4 = arith.sitofp %3 : i32 to f64 // CHECK-NEXT: memref.store %4, %arg0[%arg4] : memref<?xf64> // CHECK-NEXT: scf.yield // CHECK-NEXT: } // CHECK-NEXT: return // CHECK-NEXT: }
target-2.c
/* { dg-do run } */ #include <stdlib.h> #define N 100000 void init (char *a1, char *a2) { char s = -1; int i; for (i = 0; i < N; i++) { a1[i] = s; a2[i] = i; s = -s; } } void check (char *a, char *b) { int i; for (i = 0; i < N; i++) if (a[i] != b[i]) abort (); } void vec_mult_ref (char *p) { int i; char v1[N], v2[N]; init (v1, v2); for (i = 0; i < N; i++) p[i] = v1[i] * v2[i]; } void vec_mult (char *p) { int i; char v1[N], v2[N]; init (v1, v2); #pragma omp target map(from: p[0:N]) #pragma omp parallel for for (i = 0; i < N; i++) p[i] = v1[i] * v2[i]; } int main () { char p1[N], p2[N]; char v1[N], v2[N]; init (v1, v2); vec_mult_ref (p1); vec_mult (p2); check (p1, p2); return 0; }
main.c
// // main.c // omp_for_reduction // // Created by Vicente Cubells Nonell on 03/11/14. // Copyright (c) 2014 Vicente Cubells Nonell. All rights reserved. // #include <stdio.h> #include <omp.h> int main(int argc, const char * argv[]) { int n = 1000000; int suma = 0; int i; int numeros[n]; #pragma omp parallel for default(none) shared(n, numeros) private(i) for (i = 0; i < n; ++i) { numeros[i] = 1; } #pragma omp parallel for default(none) shared(n, numeros) private(i) reduction(+:suma) for (i = 0; i < n; ++i) { suma += numeros[i]; } printf("La suma total es = %d\n", suma); return 0; }
kmp_atomic.c
/* * kmp_atomic.c -- ATOMIC implementation routines */ /* <copyright> Copyright (c) 1997-2016 Intel Corporation. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. </copyright> */ #include "kmp_atomic.h" #include "kmp.h" // TRUE, asm routines prototypes typedef unsigned char uchar; typedef unsigned short ushort; /*! @defgroup ATOMIC_OPS Atomic Operations These functions are used for implementing the many different varieties of atomic operations. The compiler is at liberty to inline atomic operations that are naturally supported by the target architecture. For instance on IA-32 architecture an atomic like this can be inlined @code static int s = 0; #pragma omp atomic s++; @endcode using the single instruction: `lock; incl s` However the runtime does provide entrypoints for these operations to support compilers that choose not to inline them. (For instance, `__kmpc_atomic_fixed4_add` could be used to perform the increment above.) The names of the functions are encoded by using the data type name and the operation name, as in these tables. Data Type | Data type encoding -----------|--------------- int8_t | `fixed1` uint8_t | `fixed1u` int16_t | `fixed2` uint16_t | `fixed2u` int32_t | `fixed4` uint32_t | `fixed4u` int32_t | `fixed8` uint32_t | `fixed8u` float | `float4` double | `float8` float 10 (8087 eighty bit float) | `float10` complex<float> | `cmplx4` complex<double> | `cmplx8` complex<float10> | `cmplx10` <br> Operation | Operation encoding ----------|------------------- + | add - | sub \* | mul / | div & | andb << | shl \>\> | shr \| | orb ^ | xor && | andl \|\| | orl maximum | max minimum | min .eqv. | eqv .neqv. | neqv <br> For non-commutative operations, `_rev` can also be added for the reversed operation. For the functions that capture the result, the suffix `_cpt` is added. Update Functions ================ The general form of an atomic function that just performs an update (without a `capture`) @code void __kmpc_atomic_<datatype>_<operation>( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand `capture` functions =================== The capture functions perform an atomic update and return a result, which is either the value before the capture, or that after. They take an additional argument to determine which result is returned. Their general form is therefore @code TYPE __kmpc_atomic_<datatype>_<operation>_cpt( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand @param flag one if the result is to be captured *after* the operation, zero if captured *before*. The one set of exceptions to this is the `complex<float>` type where the value is not returned, rather an extra argument pointer is passed. They look like @code void __kmpc_atomic_cmplx4_<op>_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag ); @endcode Read and Write Operations ========================= The OpenMP<sup>*</sup> standard now supports atomic operations that simply ensure that the value is read or written atomically, with no modification performed. In many cases on IA-32 architecture these operations can be inlined since the architecture guarantees that no tearing occurs on aligned objects accessed with a single memory operation of up to 64 bits in size. The general form of the read operations is @code TYPE __kmpc_atomic_<type>_rd ( ident_t *id_ref, int gtid, TYPE * loc ); @endcode For the write operations the form is @code void __kmpc_atomic_<type>_wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode Full list of functions ====================== This leads to the generation of 376 atomic functions, as follows. Functons for integers --------------------- There are versions here for integers of size 1,2,4 and 8 bytes both signed and unsigned (where that matters). @code __kmpc_atomic_fixed1_add __kmpc_atomic_fixed1_add_cpt __kmpc_atomic_fixed1_add_fp __kmpc_atomic_fixed1_andb __kmpc_atomic_fixed1_andb_cpt __kmpc_atomic_fixed1_andl __kmpc_atomic_fixed1_andl_cpt __kmpc_atomic_fixed1_div __kmpc_atomic_fixed1_div_cpt __kmpc_atomic_fixed1_div_cpt_rev __kmpc_atomic_fixed1_div_float8 __kmpc_atomic_fixed1_div_fp __kmpc_atomic_fixed1_div_rev __kmpc_atomic_fixed1_eqv __kmpc_atomic_fixed1_eqv_cpt __kmpc_atomic_fixed1_max __kmpc_atomic_fixed1_max_cpt __kmpc_atomic_fixed1_min __kmpc_atomic_fixed1_min_cpt __kmpc_atomic_fixed1_mul __kmpc_atomic_fixed1_mul_cpt __kmpc_atomic_fixed1_mul_float8 __kmpc_atomic_fixed1_mul_fp __kmpc_atomic_fixed1_neqv __kmpc_atomic_fixed1_neqv_cpt __kmpc_atomic_fixed1_orb __kmpc_atomic_fixed1_orb_cpt __kmpc_atomic_fixed1_orl __kmpc_atomic_fixed1_orl_cpt __kmpc_atomic_fixed1_rd __kmpc_atomic_fixed1_shl __kmpc_atomic_fixed1_shl_cpt __kmpc_atomic_fixed1_shl_cpt_rev __kmpc_atomic_fixed1_shl_rev __kmpc_atomic_fixed1_shr __kmpc_atomic_fixed1_shr_cpt __kmpc_atomic_fixed1_shr_cpt_rev __kmpc_atomic_fixed1_shr_rev __kmpc_atomic_fixed1_sub __kmpc_atomic_fixed1_sub_cpt __kmpc_atomic_fixed1_sub_cpt_rev __kmpc_atomic_fixed1_sub_fp __kmpc_atomic_fixed1_sub_rev __kmpc_atomic_fixed1_swp __kmpc_atomic_fixed1_wr __kmpc_atomic_fixed1_xor __kmpc_atomic_fixed1_xor_cpt __kmpc_atomic_fixed1u_div __kmpc_atomic_fixed1u_div_cpt __kmpc_atomic_fixed1u_div_cpt_rev __kmpc_atomic_fixed1u_div_fp __kmpc_atomic_fixed1u_div_rev __kmpc_atomic_fixed1u_shr __kmpc_atomic_fixed1u_shr_cpt __kmpc_atomic_fixed1u_shr_cpt_rev __kmpc_atomic_fixed1u_shr_rev __kmpc_atomic_fixed2_add __kmpc_atomic_fixed2_add_cpt __kmpc_atomic_fixed2_add_fp __kmpc_atomic_fixed2_andb __kmpc_atomic_fixed2_andb_cpt __kmpc_atomic_fixed2_andl __kmpc_atomic_fixed2_andl_cpt __kmpc_atomic_fixed2_div __kmpc_atomic_fixed2_div_cpt __kmpc_atomic_fixed2_div_cpt_rev __kmpc_atomic_fixed2_div_float8 __kmpc_atomic_fixed2_div_fp __kmpc_atomic_fixed2_div_rev __kmpc_atomic_fixed2_eqv __kmpc_atomic_fixed2_eqv_cpt __kmpc_atomic_fixed2_max __kmpc_atomic_fixed2_max_cpt __kmpc_atomic_fixed2_min __kmpc_atomic_fixed2_min_cpt __kmpc_atomic_fixed2_mul __kmpc_atomic_fixed2_mul_cpt __kmpc_atomic_fixed2_mul_float8 __kmpc_atomic_fixed2_mul_fp __kmpc_atomic_fixed2_neqv __kmpc_atomic_fixed2_neqv_cpt __kmpc_atomic_fixed2_orb __kmpc_atomic_fixed2_orb_cpt __kmpc_atomic_fixed2_orl __kmpc_atomic_fixed2_orl_cpt __kmpc_atomic_fixed2_rd __kmpc_atomic_fixed2_shl __kmpc_atomic_fixed2_shl_cpt __kmpc_atomic_fixed2_shl_cpt_rev __kmpc_atomic_fixed2_shl_rev __kmpc_atomic_fixed2_shr __kmpc_atomic_fixed2_shr_cpt __kmpc_atomic_fixed2_shr_cpt_rev __kmpc_atomic_fixed2_shr_rev __kmpc_atomic_fixed2_sub __kmpc_atomic_fixed2_sub_cpt __kmpc_atomic_fixed2_sub_cpt_rev __kmpc_atomic_fixed2_sub_fp __kmpc_atomic_fixed2_sub_rev __kmpc_atomic_fixed2_swp __kmpc_atomic_fixed2_wr __kmpc_atomic_fixed2_xor __kmpc_atomic_fixed2_xor_cpt __kmpc_atomic_fixed2u_div __kmpc_atomic_fixed2u_div_cpt __kmpc_atomic_fixed2u_div_cpt_rev __kmpc_atomic_fixed2u_div_fp __kmpc_atomic_fixed2u_div_rev __kmpc_atomic_fixed2u_shr __kmpc_atomic_fixed2u_shr_cpt __kmpc_atomic_fixed2u_shr_cpt_rev __kmpc_atomic_fixed2u_shr_rev __kmpc_atomic_fixed4_add __kmpc_atomic_fixed4_add_cpt __kmpc_atomic_fixed4_add_fp __kmpc_atomic_fixed4_andb __kmpc_atomic_fixed4_andb_cpt __kmpc_atomic_fixed4_andl __kmpc_atomic_fixed4_andl_cpt __kmpc_atomic_fixed4_div __kmpc_atomic_fixed4_div_cpt __kmpc_atomic_fixed4_div_cpt_rev __kmpc_atomic_fixed4_div_float8 __kmpc_atomic_fixed4_div_fp __kmpc_atomic_fixed4_div_rev __kmpc_atomic_fixed4_eqv __kmpc_atomic_fixed4_eqv_cpt __kmpc_atomic_fixed4_max __kmpc_atomic_fixed4_max_cpt __kmpc_atomic_fixed4_min __kmpc_atomic_fixed4_min_cpt __kmpc_atomic_fixed4_mul __kmpc_atomic_fixed4_mul_cpt __kmpc_atomic_fixed4_mul_float8 __kmpc_atomic_fixed4_mul_fp __kmpc_atomic_fixed4_neqv __kmpc_atomic_fixed4_neqv_cpt __kmpc_atomic_fixed4_orb __kmpc_atomic_fixed4_orb_cpt __kmpc_atomic_fixed4_orl __kmpc_atomic_fixed4_orl_cpt __kmpc_atomic_fixed4_rd __kmpc_atomic_fixed4_shl __kmpc_atomic_fixed4_shl_cpt __kmpc_atomic_fixed4_shl_cpt_rev __kmpc_atomic_fixed4_shl_rev __kmpc_atomic_fixed4_shr __kmpc_atomic_fixed4_shr_cpt __kmpc_atomic_fixed4_shr_cpt_rev __kmpc_atomic_fixed4_shr_rev __kmpc_atomic_fixed4_sub __kmpc_atomic_fixed4_sub_cpt __kmpc_atomic_fixed4_sub_cpt_rev __kmpc_atomic_fixed4_sub_fp __kmpc_atomic_fixed4_sub_rev __kmpc_atomic_fixed4_swp __kmpc_atomic_fixed4_wr __kmpc_atomic_fixed4_xor __kmpc_atomic_fixed4_xor_cpt __kmpc_atomic_fixed4u_div __kmpc_atomic_fixed4u_div_cpt __kmpc_atomic_fixed4u_div_cpt_rev __kmpc_atomic_fixed4u_div_fp __kmpc_atomic_fixed4u_div_rev __kmpc_atomic_fixed4u_shr __kmpc_atomic_fixed4u_shr_cpt __kmpc_atomic_fixed4u_shr_cpt_rev __kmpc_atomic_fixed4u_shr_rev __kmpc_atomic_fixed8_add __kmpc_atomic_fixed8_add_cpt __kmpc_atomic_fixed8_add_fp __kmpc_atomic_fixed8_andb __kmpc_atomic_fixed8_andb_cpt __kmpc_atomic_fixed8_andl __kmpc_atomic_fixed8_andl_cpt __kmpc_atomic_fixed8_div __kmpc_atomic_fixed8_div_cpt __kmpc_atomic_fixed8_div_cpt_rev __kmpc_atomic_fixed8_div_float8 __kmpc_atomic_fixed8_div_fp __kmpc_atomic_fixed8_div_rev __kmpc_atomic_fixed8_eqv __kmpc_atomic_fixed8_eqv_cpt __kmpc_atomic_fixed8_max __kmpc_atomic_fixed8_max_cpt __kmpc_atomic_fixed8_min __kmpc_atomic_fixed8_min_cpt __kmpc_atomic_fixed8_mul __kmpc_atomic_fixed8_mul_cpt __kmpc_atomic_fixed8_mul_float8 __kmpc_atomic_fixed8_mul_fp __kmpc_atomic_fixed8_neqv __kmpc_atomic_fixed8_neqv_cpt __kmpc_atomic_fixed8_orb __kmpc_atomic_fixed8_orb_cpt __kmpc_atomic_fixed8_orl __kmpc_atomic_fixed8_orl_cpt __kmpc_atomic_fixed8_rd __kmpc_atomic_fixed8_shl __kmpc_atomic_fixed8_shl_cpt __kmpc_atomic_fixed8_shl_cpt_rev __kmpc_atomic_fixed8_shl_rev __kmpc_atomic_fixed8_shr __kmpc_atomic_fixed8_shr_cpt __kmpc_atomic_fixed8_shr_cpt_rev __kmpc_atomic_fixed8_shr_rev __kmpc_atomic_fixed8_sub __kmpc_atomic_fixed8_sub_cpt __kmpc_atomic_fixed8_sub_cpt_rev __kmpc_atomic_fixed8_sub_fp __kmpc_atomic_fixed8_sub_rev __kmpc_atomic_fixed8_swp __kmpc_atomic_fixed8_wr __kmpc_atomic_fixed8_xor __kmpc_atomic_fixed8_xor_cpt __kmpc_atomic_fixed8u_div __kmpc_atomic_fixed8u_div_cpt __kmpc_atomic_fixed8u_div_cpt_rev __kmpc_atomic_fixed8u_div_fp __kmpc_atomic_fixed8u_div_rev __kmpc_atomic_fixed8u_shr __kmpc_atomic_fixed8u_shr_cpt __kmpc_atomic_fixed8u_shr_cpt_rev __kmpc_atomic_fixed8u_shr_rev @endcode Functions for floating point ---------------------------- There are versions here for floating point numbers of size 4, 8, 10 and 16 bytes. (Ten byte floats are used by X87, but are now rare). @code __kmpc_atomic_float4_add __kmpc_atomic_float4_add_cpt __kmpc_atomic_float4_add_float8 __kmpc_atomic_float4_add_fp __kmpc_atomic_float4_div __kmpc_atomic_float4_div_cpt __kmpc_atomic_float4_div_cpt_rev __kmpc_atomic_float4_div_float8 __kmpc_atomic_float4_div_fp __kmpc_atomic_float4_div_rev __kmpc_atomic_float4_max __kmpc_atomic_float4_max_cpt __kmpc_atomic_float4_min __kmpc_atomic_float4_min_cpt __kmpc_atomic_float4_mul __kmpc_atomic_float4_mul_cpt __kmpc_atomic_float4_mul_float8 __kmpc_atomic_float4_mul_fp __kmpc_atomic_float4_rd __kmpc_atomic_float4_sub __kmpc_atomic_float4_sub_cpt __kmpc_atomic_float4_sub_cpt_rev __kmpc_atomic_float4_sub_float8 __kmpc_atomic_float4_sub_fp __kmpc_atomic_float4_sub_rev __kmpc_atomic_float4_swp __kmpc_atomic_float4_wr __kmpc_atomic_float8_add __kmpc_atomic_float8_add_cpt __kmpc_atomic_float8_add_fp __kmpc_atomic_float8_div __kmpc_atomic_float8_div_cpt __kmpc_atomic_float8_div_cpt_rev __kmpc_atomic_float8_div_fp __kmpc_atomic_float8_div_rev __kmpc_atomic_float8_max __kmpc_atomic_float8_max_cpt __kmpc_atomic_float8_min __kmpc_atomic_float8_min_cpt __kmpc_atomic_float8_mul __kmpc_atomic_float8_mul_cpt __kmpc_atomic_float8_mul_fp __kmpc_atomic_float8_rd __kmpc_atomic_float8_sub __kmpc_atomic_float8_sub_cpt __kmpc_atomic_float8_sub_cpt_rev __kmpc_atomic_float8_sub_fp __kmpc_atomic_float8_sub_rev __kmpc_atomic_float8_swp __kmpc_atomic_float8_wr __kmpc_atomic_float10_add __kmpc_atomic_float10_add_cpt __kmpc_atomic_float10_add_fp __kmpc_atomic_float10_div __kmpc_atomic_float10_div_cpt __kmpc_atomic_float10_div_cpt_rev __kmpc_atomic_float10_div_fp __kmpc_atomic_float10_div_rev __kmpc_atomic_float10_mul __kmpc_atomic_float10_mul_cpt __kmpc_atomic_float10_mul_fp __kmpc_atomic_float10_rd __kmpc_atomic_float10_sub __kmpc_atomic_float10_sub_cpt __kmpc_atomic_float10_sub_cpt_rev __kmpc_atomic_float10_sub_fp __kmpc_atomic_float10_sub_rev __kmpc_atomic_float10_swp __kmpc_atomic_float10_wr __kmpc_atomic_float16_add __kmpc_atomic_float16_add_cpt __kmpc_atomic_float16_div __kmpc_atomic_float16_div_cpt __kmpc_atomic_float16_div_cpt_rev __kmpc_atomic_float16_div_rev __kmpc_atomic_float16_max __kmpc_atomic_float16_max_cpt __kmpc_atomic_float16_min __kmpc_atomic_float16_min_cpt __kmpc_atomic_float16_mul __kmpc_atomic_float16_mul_cpt __kmpc_atomic_float16_rd __kmpc_atomic_float16_sub __kmpc_atomic_float16_sub_cpt __kmpc_atomic_float16_sub_cpt_rev __kmpc_atomic_float16_sub_rev __kmpc_atomic_float16_swp __kmpc_atomic_float16_wr @endcode Functions for Complex types --------------------------- Functions for complex types whose component floating point variables are of size 4,8,10 or 16 bytes. The names here are based on the size of the component float, *not* the size of the complex type. So `__kmpc_atomc_cmplx8_add` is an operation on a `complex<double>` or `complex(kind=8)`, *not* `complex<float>`. @code __kmpc_atomic_cmplx4_add __kmpc_atomic_cmplx4_add_cmplx8 __kmpc_atomic_cmplx4_add_cpt __kmpc_atomic_cmplx4_div __kmpc_atomic_cmplx4_div_cmplx8 __kmpc_atomic_cmplx4_div_cpt __kmpc_atomic_cmplx4_div_cpt_rev __kmpc_atomic_cmplx4_div_rev __kmpc_atomic_cmplx4_mul __kmpc_atomic_cmplx4_mul_cmplx8 __kmpc_atomic_cmplx4_mul_cpt __kmpc_atomic_cmplx4_rd __kmpc_atomic_cmplx4_sub __kmpc_atomic_cmplx4_sub_cmplx8 __kmpc_atomic_cmplx4_sub_cpt __kmpc_atomic_cmplx4_sub_cpt_rev __kmpc_atomic_cmplx4_sub_rev __kmpc_atomic_cmplx4_swp __kmpc_atomic_cmplx4_wr __kmpc_atomic_cmplx8_add __kmpc_atomic_cmplx8_add_cpt __kmpc_atomic_cmplx8_div __kmpc_atomic_cmplx8_div_cpt __kmpc_atomic_cmplx8_div_cpt_rev __kmpc_atomic_cmplx8_div_rev __kmpc_atomic_cmplx8_mul __kmpc_atomic_cmplx8_mul_cpt __kmpc_atomic_cmplx8_rd __kmpc_atomic_cmplx8_sub __kmpc_atomic_cmplx8_sub_cpt __kmpc_atomic_cmplx8_sub_cpt_rev __kmpc_atomic_cmplx8_sub_rev __kmpc_atomic_cmplx8_swp __kmpc_atomic_cmplx8_wr __kmpc_atomic_cmplx10_add __kmpc_atomic_cmplx10_add_cpt __kmpc_atomic_cmplx10_div __kmpc_atomic_cmplx10_div_cpt __kmpc_atomic_cmplx10_div_cpt_rev __kmpc_atomic_cmplx10_div_rev __kmpc_atomic_cmplx10_mul __kmpc_atomic_cmplx10_mul_cpt __kmpc_atomic_cmplx10_rd __kmpc_atomic_cmplx10_sub __kmpc_atomic_cmplx10_sub_cpt __kmpc_atomic_cmplx10_sub_cpt_rev __kmpc_atomic_cmplx10_sub_rev __kmpc_atomic_cmplx10_swp __kmpc_atomic_cmplx10_wr __kmpc_atomic_cmplx16_add __kmpc_atomic_cmplx16_add_cpt __kmpc_atomic_cmplx16_div __kmpc_atomic_cmplx16_div_cpt __kmpc_atomic_cmplx16_div_cpt_rev __kmpc_atomic_cmplx16_div_rev __kmpc_atomic_cmplx16_mul __kmpc_atomic_cmplx16_mul_cpt __kmpc_atomic_cmplx16_rd __kmpc_atomic_cmplx16_sub __kmpc_atomic_cmplx16_sub_cpt __kmpc_atomic_cmplx16_sub_cpt_rev __kmpc_atomic_cmplx16_swp __kmpc_atomic_cmplx16_wr @endcode */ /*! @ingroup ATOMIC_OPS @{ */ /* * Global vars */ #ifndef KMP_GOMP_COMPAT int __kmp_atomic_mode = 1; // Intel perf #else int __kmp_atomic_mode = 2; // GOMP compatibility #endif /* KMP_GOMP_COMPAT */ KMP_ALIGN(128) kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */ kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */ kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */ kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */ kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/ kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/ kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */ /* 2007-03-02: Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a bug on *_32 and *_32e. This is just a temporary workaround for the problem. It seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG routines in assembler language. */ #define KMP_ATOMIC_VOLATILE volatile #if ( KMP_ARCH_X86 ) && KMP_HAVE_QUAD static inline void operator +=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline void operator +=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q /= rhs.q; }; #endif /* ------------------------------------------------------------------------ */ /* ATOMIC implementation routines */ /* one routine for each operation and operand type */ /* ------------------------------------------------------------------------ */ // All routines declarations looks like // void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs ); // ------------------------------------------------------------------------ #define KMP_CHECK_GTID \ if ( gtid == KMP_GTID_UNKNOWN ) { \ gtid = __kmp_entry_gtid(); \ } // check and get gtid when needed // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Lock variables used for critical sections for various size operands #define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat #define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char #define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short #define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int #define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float #define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int #define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double #define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex #define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double #define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad #define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex #define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex #define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) OP (rhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ // For GNU compatibility, we may need to use a critical section, // even though it is not required by the ISA. // // On IA-32 architecture, all atomic operations except for fixed 4 byte add, // sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common // critical section. On Intel(R) 64, all atomic operations are done with fetch // and add or compare and exchange. Therefore, the FLAG parameter to this // macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which // require a critical section, where we predict that they will be implemented // in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()). // // When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct, // the FLAG parameter should always be 1. If we know that we will be using // a critical section, then we want to make certain that we use the generic // lock __kmp_atomic_lock to protect the atomic update, and not of of the // locks that are specialized based upon the size or type of the data. // // If FLAG is 0, then we are relying on dead code elimination by the build // compiler to get rid of the useless block of code, and save a needless // branch at runtime. // #ifdef KMP_GOMP_COMPAT # define OP_GOMP_CRITICAL(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL( OP, 0 ); \ return; \ } # else # define OP_GOMP_CRITICAL(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ #if KMP_MIC # define KMP_DO_PAUSE _mm_delay_32( 1 ) #else # define KMP_DO_PAUSE KMP_CPU_PAUSE() #endif /* KMP_MIC */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator #define OP_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE old_value, new_value; \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ } \ } #if USE_CMPXCHG_FIX // 2007-06-25: // workaround for C78287 (complex(kind=4) data type) // lin_32, lin_32e, win_32 and win_32e are affected (I verified the asm) // Compiler ignores the volatile qualifier of the temp_val in the OP_CMPXCHG macro. // This is a problem of the compiler. // Related tracker is C76005, targeted to 11.0. // I verified the asm of the workaround. #define OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ { \ struct _sss { \ TYPE cmp; \ kmp_int##BITS *vvv; \ }; \ struct _sss old_value, new_value; \ old_value.vvv = ( kmp_int##BITS * )&old_value.cmp; \ new_value.vvv = ( kmp_int##BITS * )&new_value.cmp; \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \ *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv ) ) \ { \ KMP_DO_PAUSE; \ \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ } \ } // end of the first part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #endif #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // end of the second part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // Routines for ATOMIC 4-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed4, add, kmp_int32, 32, +, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add ATOMIC_FIXED_ADD( fixed4, sub, kmp_int32, 32, -, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub ATOMIC_CMPXCHG( float4, add, kmp_real32, 32, +, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add ATOMIC_CMPXCHG( float4, sub, kmp_real32, 32, -, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub // Routines for ATOMIC 8-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed8, add, kmp_int64, 64, +, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add ATOMIC_FIXED_ADD( fixed8, sub, kmp_int64, 64, -, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub ATOMIC_CMPXCHG( float8, add, kmp_real64, 64, +, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add ATOMIC_CMPXCHG( float8, sub, kmp_real64, 64, -, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // MASK - used for alignment check // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG( fixed1, add, kmp_int8, 8, +, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add ATOMIC_CMPXCHG( fixed1, andb, kmp_int8, 8, &, 1i, 0, 0 ) // __kmpc_atomic_fixed1_andb ATOMIC_CMPXCHG( fixed1, div, kmp_int8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div ATOMIC_CMPXCHG( fixed1u, div, kmp_uint8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div ATOMIC_CMPXCHG( fixed1, mul, kmp_int8, 8, *, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul ATOMIC_CMPXCHG( fixed1, orb, kmp_int8, 8, |, 1i, 0, 0 ) // __kmpc_atomic_fixed1_orb ATOMIC_CMPXCHG( fixed1, shl, kmp_int8, 8, <<, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl ATOMIC_CMPXCHG( fixed1, shr, kmp_int8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr ATOMIC_CMPXCHG( fixed1u, shr, kmp_uint8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr ATOMIC_CMPXCHG( fixed1, sub, kmp_int8, 8, -, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub ATOMIC_CMPXCHG( fixed1, xor, kmp_int8, 8, ^, 1i, 0, 0 ) // __kmpc_atomic_fixed1_xor ATOMIC_CMPXCHG( fixed2, add, kmp_int16, 16, +, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add ATOMIC_CMPXCHG( fixed2, andb, kmp_int16, 16, &, 2i, 1, 0 ) // __kmpc_atomic_fixed2_andb ATOMIC_CMPXCHG( fixed2, div, kmp_int16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div ATOMIC_CMPXCHG( fixed2u, div, kmp_uint16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div ATOMIC_CMPXCHG( fixed2, mul, kmp_int16, 16, *, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul ATOMIC_CMPXCHG( fixed2, orb, kmp_int16, 16, |, 2i, 1, 0 ) // __kmpc_atomic_fixed2_orb ATOMIC_CMPXCHG( fixed2, shl, kmp_int16, 16, <<, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl ATOMIC_CMPXCHG( fixed2, shr, kmp_int16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr ATOMIC_CMPXCHG( fixed2u, shr, kmp_uint16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr ATOMIC_CMPXCHG( fixed2, sub, kmp_int16, 16, -, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub ATOMIC_CMPXCHG( fixed2, xor, kmp_int16, 16, ^, 2i, 1, 0 ) // __kmpc_atomic_fixed2_xor ATOMIC_CMPXCHG( fixed4, andb, kmp_int32, 32, &, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andb ATOMIC_CMPXCHG( fixed4, div, kmp_int32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div ATOMIC_CMPXCHG( fixed4u, div, kmp_uint32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div ATOMIC_CMPXCHG( fixed4, mul, kmp_int32, 32, *, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul ATOMIC_CMPXCHG( fixed4, orb, kmp_int32, 32, |, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orb ATOMIC_CMPXCHG( fixed4, shl, kmp_int32, 32, <<, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl ATOMIC_CMPXCHG( fixed4, shr, kmp_int32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr ATOMIC_CMPXCHG( fixed4u, shr, kmp_uint32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr ATOMIC_CMPXCHG( fixed4, xor, kmp_int32, 32, ^, 4i, 3, 0 ) // __kmpc_atomic_fixed4_xor ATOMIC_CMPXCHG( fixed8, andb, kmp_int64, 64, &, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb ATOMIC_CMPXCHG( fixed8, div, kmp_int64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div ATOMIC_CMPXCHG( fixed8u, div, kmp_uint64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div ATOMIC_CMPXCHG( fixed8, mul, kmp_int64, 64, *, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul ATOMIC_CMPXCHG( fixed8, orb, kmp_int64, 64, |, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb ATOMIC_CMPXCHG( fixed8, shl, kmp_int64, 64, <<, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl ATOMIC_CMPXCHG( fixed8, shr, kmp_int64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr ATOMIC_CMPXCHG( fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr ATOMIC_CMPXCHG( fixed8, xor, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor ATOMIC_CMPXCHG( float4, div, kmp_real32, 32, /, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div ATOMIC_CMPXCHG( float4, mul, kmp_real32, 32, *, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul ATOMIC_CMPXCHG( float8, div, kmp_real64, 64, /, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div ATOMIC_CMPXCHG( float8, mul, kmp_real64, 64, *, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG /* ------------------------------------------------------------------------ */ /* Routines for C/C++ Reduction operators && and || */ /* ------------------------------------------------------------------------ */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment // TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used #define ATOMIC_CRIT_L(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CRITICAL( = *lhs OP, LCK_ID ) \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(= *lhs OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(= *lhs OP,LCK_ID) /* unaligned - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPX_L( fixed1, andl, char, 8, &&, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl ATOMIC_CMPX_L( fixed1, orl, char, 8, ||, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl ATOMIC_CMPX_L( fixed2, andl, short, 16, &&, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl ATOMIC_CMPX_L( fixed2, orl, short, 16, ||, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl ATOMIC_CMPX_L( fixed4, andl, kmp_int32, 32, &&, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andl ATOMIC_CMPX_L( fixed4, orl, kmp_int32, 32, ||, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orl ATOMIC_CMPX_L( fixed8, andl, kmp_int64, 64, &&, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl ATOMIC_CMPX_L( fixed8, orl, kmp_int64, 64, ||, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl /* ------------------------------------------------------------------------- */ /* Routines for Fortran operators that matched no one in C: */ /* MAX, MIN, .EQV., .NEQV. */ /* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */ /* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */ /* ------------------------------------------------------------------------- */ // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ *lhs = rhs; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT( OP, 0 ); \ return; \ } #else #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value; \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT(OP,LCK_ID) \ } \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ } \ } #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT(OP,LCK_ID) /* unaligned address */ \ } \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ MIN_MAX_COMPXCHG( fixed1, max, char, 8, <, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max MIN_MAX_COMPXCHG( fixed1, min, char, 8, >, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min MIN_MAX_COMPXCHG( fixed2, max, short, 16, <, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max MIN_MAX_COMPXCHG( fixed2, min, short, 16, >, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min MIN_MAX_COMPXCHG( fixed4, max, kmp_int32, 32, <, 4i, 3, 0 ) // __kmpc_atomic_fixed4_max MIN_MAX_COMPXCHG( fixed4, min, kmp_int32, 32, >, 4i, 3, 0 ) // __kmpc_atomic_fixed4_min MIN_MAX_COMPXCHG( fixed8, max, kmp_int64, 64, <, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max MIN_MAX_COMPXCHG( fixed8, min, kmp_int64, 64, >, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min MIN_MAX_COMPXCHG( float4, max, kmp_real32, 32, <, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max MIN_MAX_COMPXCHG( float4, min, kmp_real32, 32, >, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min MIN_MAX_COMPXCHG( float8, max, kmp_real64, 64, <, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max MIN_MAX_COMPXCHG( float8, min, kmp_real64, 64, >, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min #if KMP_HAVE_QUAD MIN_MAX_CRITICAL( float16, max, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max MIN_MAX_CRITICAL( float16, min, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL( float16, max_a16, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16 MIN_MAX_CRITICAL( float16, min_a16, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_min_a16 #endif #endif // ------------------------------------------------------------------------ // Need separate macros for .EQV. because of the need of complement (~) // OP ignored for critical sections, ^=~ used instead #define ATOMIC_CRIT_EQV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(^=~,LCK_ID) /* send assignment and complement */ \ } // ------------------------------------------------------------------------ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(^=~,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG( fixed1, neqv, kmp_int8, 8, ^, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv ATOMIC_CMPXCHG( fixed2, neqv, kmp_int16, 16, ^, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv ATOMIC_CMPXCHG( fixed4, neqv, kmp_int32, 32, ^, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv ATOMIC_CMPXCHG( fixed8, neqv, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv ATOMIC_CMPX_EQV( fixed1, eqv, kmp_int8, 8, ^~, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv ATOMIC_CMPX_EQV( fixed2, eqv, kmp_int16, 16, ^~, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv ATOMIC_CMPX_EQV( fixed4, eqv, kmp_int32, 32, ^~, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv ATOMIC_CMPX_EQV( fixed8, eqv, kmp_int64, 64, ^~, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL( float10, add, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add ATOMIC_CRITICAL( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub ATOMIC_CRITICAL( float10, mul, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul ATOMIC_CRITICAL( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL( float16, add, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add ATOMIC_CRITICAL( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub ATOMIC_CRITICAL( float16, mul, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul ATOMIC_CRITICAL( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( float16, add_a16, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16 ATOMIC_CRITICAL( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16 ATOMIC_CRITICAL( float16, mul_a16, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16 ATOMIC_CRITICAL( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16 #endif #endif // routines for complex types #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) ATOMIC_CMPXCHG_WORKAROUND( cmplx4, add, kmp_cmplx32, 64, +, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CMPXCHG_WORKAROUND( cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CMPXCHG_WORKAROUND( cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CMPXCHG_WORKAROUND( cmplx4, div, kmp_cmplx32, 64, /, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_div // end of the workaround for C78287 #else ATOMIC_CRITICAL( cmplx4, add, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CRITICAL( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CRITICAL( cmplx4, mul, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CRITICAL( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div #endif // USE_CMPXCHG_FIX ATOMIC_CRITICAL( cmplx8, add, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add ATOMIC_CRITICAL( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub ATOMIC_CRITICAL( cmplx8, mul, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul ATOMIC_CRITICAL( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div ATOMIC_CRITICAL( cmplx10, add, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add ATOMIC_CRITICAL( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub ATOMIC_CRITICAL( cmplx10, mul, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul ATOMIC_CRITICAL( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div #if KMP_HAVE_QUAD ATOMIC_CRITICAL( cmplx16, add, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add ATOMIC_CRITICAL( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub ATOMIC_CRITICAL( cmplx16, mul, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul ATOMIC_CRITICAL( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16 ATOMIC_CRITICAL( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16 ATOMIC_CRITICAL( cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16 ATOMIC_CRITICAL( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16 #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: x = expr binop x for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) = (rhs) OP (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_REV( OP, 0 ); \ return; \ } #else #define OP_GOMP_CRITICAL_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_REV(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG_REV( fixed1, div, kmp_int8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev ATOMIC_CMPXCHG_REV( fixed1u, div, kmp_uint8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev ATOMIC_CMPXCHG_REV( fixed1, shl, kmp_int8, 8, <<, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_rev ATOMIC_CMPXCHG_REV( fixed1, shr, kmp_int8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_rev ATOMIC_CMPXCHG_REV( fixed1u, shr, kmp_uint8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_rev ATOMIC_CMPXCHG_REV( fixed1, sub, kmp_int8, 8, -, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev ATOMIC_CMPXCHG_REV( fixed2, div, kmp_int16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev ATOMIC_CMPXCHG_REV( fixed2u, div, kmp_uint16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev ATOMIC_CMPXCHG_REV( fixed2, shl, kmp_int16, 16, <<, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_rev ATOMIC_CMPXCHG_REV( fixed2, shr, kmp_int16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_rev ATOMIC_CMPXCHG_REV( fixed2u, shr, kmp_uint16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_rev ATOMIC_CMPXCHG_REV( fixed2, sub, kmp_int16, 16, -, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev ATOMIC_CMPXCHG_REV( fixed4, div, kmp_int32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_rev ATOMIC_CMPXCHG_REV( fixed4u, div, kmp_uint32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_rev ATOMIC_CMPXCHG_REV( fixed4, shl, kmp_int32, 32, <<, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_rev ATOMIC_CMPXCHG_REV( fixed4, shr, kmp_int32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_rev ATOMIC_CMPXCHG_REV( fixed4u, shr, kmp_uint32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_rev ATOMIC_CMPXCHG_REV( fixed4, sub, kmp_int32, 32, -, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_rev ATOMIC_CMPXCHG_REV( fixed8, div, kmp_int64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev ATOMIC_CMPXCHG_REV( fixed8u, div, kmp_uint64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev ATOMIC_CMPXCHG_REV( fixed8, shl, kmp_int64, 64, <<, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_rev ATOMIC_CMPXCHG_REV( fixed8, shr, kmp_int64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_rev ATOMIC_CMPXCHG_REV( fixed8u, shr, kmp_uint64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_rev ATOMIC_CMPXCHG_REV( fixed8, sub, kmp_int64, 64, -, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev ATOMIC_CMPXCHG_REV( float4, div, kmp_real32, 32, /, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev ATOMIC_CMPXCHG_REV( float4, sub, kmp_real32, 32, -, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev ATOMIC_CMPXCHG_REV( float8, div, kmp_real64, 64, /, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev ATOMIC_CMPXCHG_REV( float8, sub, kmp_real64, 64, -, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CRITICAL_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_REV( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_rev ATOMIC_CRITICAL_REV( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_REV( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_rev ATOMIC_CRITICAL_REV( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_rev ATOMIC_CRITICAL_REV( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_rev #endif #endif // routines for complex types ATOMIC_CRITICAL_REV( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_rev ATOMIC_CRITICAL_REV( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_rev ATOMIC_CRITICAL_REV( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_rev ATOMIC_CRITICAL_REV( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_rev ATOMIC_CRITICAL_REV( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_rev ATOMIC_CRITICAL_REV( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_REV( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_rev ATOMIC_CRITICAL_REV( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_rev ATOMIC_CRITICAL_REV( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_rev #endif #endif #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 // End of OpenMP 4.0: x = expr binop x for non-commutative operations. #endif //OMP_40_ENABLED /* ------------------------------------------------------------------------ */ /* Routines for mixed types of LHS and RHS, when RHS is "larger" */ /* Note: in order to reduce the total number of types combinations */ /* it is supposed that compiler converts RHS to longest floating type,*/ /* that is _Quad, before call to any of these routines */ /* Conversion to _Quad will be done by the compiler during calculation, */ /* conversion back to TYPE - before the assignment, like: */ /* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */ /* Performance penalty expected because of SW emulation use */ /* ------------------------------------------------------------------------ */ #define ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- #define ATOMIC_CRITICAL_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // RHS=float8 ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8 ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_float8 // RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them) #if KMP_HAVE_QUAD ATOMIC_CMPXCHG_MIX( fixed1, char, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_fp ATOMIC_CMPXCHG_MIX( fixed1, char, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_fp ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_fp ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_fp ATOMIC_CMPXCHG_MIX( fixed2, short, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_fp ATOMIC_CMPXCHG_MIX( fixed2, short, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_fp ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_fp ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_fp ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_fp ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp #endif #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #else #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #endif // USE_CMPXCHG_FIX #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_add_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_sub_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_mul_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_div_cmplx8 // READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 ////////////////////////////////////////////////////////////////////////////////////////////////////// // ------------------------------------------------------------------------ // Atomic READ routines // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store_ret" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) // TODO: check if it is still necessary // Return old value regardless of the result of "compare & swap# operation #define OP_CMPXCHG_READ(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ union f_i_union { \ TYPE f_val; \ kmp_int##BITS i_val; \ }; \ union f_i_union old_value; \ temp_val = *loc; \ old_value.f_val = temp_val; \ old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( (kmp_int##BITS *) loc, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val ); \ new_value = old_value.f_val; \ return new_value; \ } // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_READ(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ new_value = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_READ(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define ATOMIC_FIXED_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ new_value = KMP_TEST_THEN_ADD##BITS( loc, OP 0 ); \ return new_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ OP_CMPXCHG_READ(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_READ(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ(OP,LCK_ID) /* send assignment */ \ return new_value; \ } // ------------------------------------------------------------------------ // Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return value doesn't work. // Let's return the read value through the additional parameter. #if ( KMP_OS_WINDOWS ) #define OP_CRITICAL_READ_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*out) = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( TYPE * out, ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_READ_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_READ_WRK(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ_WRK(OP,LCK_ID) /* send assignment */ \ } #endif // KMP_OS_WINDOWS // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_FIXED_READ( fixed4, rd, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_rd ATOMIC_FIXED_READ( fixed8, rd, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_rd ATOMIC_CMPXCHG_READ( float4, rd, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_rd ATOMIC_CMPXCHG_READ( float8, rd, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_rd // !!! TODO: Remove lock operations for "char" since it can't be non-atomic ATOMIC_CMPXCHG_READ( fixed1, rd, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_rd ATOMIC_CMPXCHG_READ( fixed2, rd, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_rd ATOMIC_CRITICAL_READ( float10, rd, long double, +, 10r, 1 ) // __kmpc_atomic_float10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( float16, rd, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_rd #endif // KMP_HAVE_QUAD // Fix for CQ220361 on Windows* OS #if ( KMP_OS_WINDOWS ) ATOMIC_CRITICAL_READ_WRK( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #else ATOMIC_CRITICAL_READ( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #endif ATOMIC_CRITICAL_READ( cmplx8, rd, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_rd ATOMIC_CRITICAL_READ( cmplx10, rd, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( cmplx16, rd, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_rd #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_READ( float16, a16_rd, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_a16_rd ATOMIC_CRITICAL_READ( cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_rd #endif #endif // ------------------------------------------------------------------------ // Atomic WRITE routines // ------------------------------------------------------------------------ #define ATOMIC_XCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_FIXED##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_REAL##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_WR(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ OP_CMPXCHG_WR(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_WR(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- ATOMIC_XCHG_WR( fixed1, wr, kmp_int8, 8, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_wr ATOMIC_XCHG_WR( fixed2, wr, kmp_int16, 16, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_wr ATOMIC_XCHG_WR( fixed4, wr, kmp_int32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #else ATOMIC_XCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #endif ATOMIC_XCHG_FLOAT_WR( float4, wr, kmp_real32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_float4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #else ATOMIC_XCHG_FLOAT_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #endif ATOMIC_CRITICAL_WR( float10, wr, long double, =, 10r, 1 ) // __kmpc_atomic_float10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( float16, wr, QUAD_LEGACY, =, 16r, 1 ) // __kmpc_atomic_float16_wr #endif ATOMIC_CRITICAL_WR( cmplx4, wr, kmp_cmplx32, =, 8c, 1 ) // __kmpc_atomic_cmplx4_wr ATOMIC_CRITICAL_WR( cmplx8, wr, kmp_cmplx64, =, 16c, 1 ) // __kmpc_atomic_cmplx8_wr ATOMIC_CRITICAL_WR( cmplx10, wr, kmp_cmplx80, =, 20c, 1 ) // __kmpc_atomic_cmplx10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( cmplx16, wr, CPLX128_LEG, =, 32c, 1 ) // __kmpc_atomic_cmplx16_wr #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_WR( float16, a16_wr, Quad_a16_t, =, 16r, 1 ) // __kmpc_atomic_float16_a16_wr ATOMIC_CRITICAL_WR( cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_wr #endif #endif // ------------------------------------------------------------------------ // Atomic CAPTURE routines // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ new_value = (*lhs); \ } else { \ new_value = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #define ATOMIC_FIXED_ADD_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE old_value, new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ old_value = KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ if( flag ) { \ return old_value OP rhs; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- ATOMIC_FIXED_ADD_CPT( fixed4, add_cpt, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_add_cpt ATOMIC_FIXED_ADD_CPT( fixed4, sub_cpt, kmp_int32, 32, -, 0 ) // __kmpc_atomic_fixed4_sub_cpt ATOMIC_FIXED_ADD_CPT( fixed8, add_cpt, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt ATOMIC_FIXED_ADD_CPT( fixed8, sub_cpt, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt ATOMIC_CMPXCHG_CPT( float4, add_cpt, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt ATOMIC_CMPXCHG_CPT( float4, sub_cpt, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt ATOMIC_CMPXCHG_CPT( float8, add_cpt, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt ATOMIC_CMPXCHG_CPT( float8, sub_cpt, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_CMPXCHG_CPT( fixed1, add_cpt, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt ATOMIC_CMPXCHG_CPT( fixed1, andb_cpt, kmp_int8, 8, &, 0 ) // __kmpc_atomic_fixed1_andb_cpt ATOMIC_CMPXCHG_CPT( fixed1, div_cpt, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt ATOMIC_CMPXCHG_CPT( fixed1u, div_cpt, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt ATOMIC_CMPXCHG_CPT( fixed1, mul_cpt, kmp_int8, 8, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt ATOMIC_CMPXCHG_CPT( fixed1, orb_cpt, kmp_int8, 8, |, 0 ) // __kmpc_atomic_fixed1_orb_cpt ATOMIC_CMPXCHG_CPT( fixed1, shl_cpt, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt ATOMIC_CMPXCHG_CPT( fixed1, shr_cpt, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1u, shr_cpt, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1, sub_cpt, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt ATOMIC_CMPXCHG_CPT( fixed1, xor_cpt, kmp_int8, 8, ^, 0 ) // __kmpc_atomic_fixed1_xor_cpt ATOMIC_CMPXCHG_CPT( fixed2, add_cpt, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt ATOMIC_CMPXCHG_CPT( fixed2, andb_cpt, kmp_int16, 16, &, 0 ) // __kmpc_atomic_fixed2_andb_cpt ATOMIC_CMPXCHG_CPT( fixed2, div_cpt, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt ATOMIC_CMPXCHG_CPT( fixed2u, div_cpt, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt ATOMIC_CMPXCHG_CPT( fixed2, mul_cpt, kmp_int16, 16, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt ATOMIC_CMPXCHG_CPT( fixed2, orb_cpt, kmp_int16, 16, |, 0 ) // __kmpc_atomic_fixed2_orb_cpt ATOMIC_CMPXCHG_CPT( fixed2, shl_cpt, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt ATOMIC_CMPXCHG_CPT( fixed2, shr_cpt, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2u, shr_cpt, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2, sub_cpt, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt ATOMIC_CMPXCHG_CPT( fixed2, xor_cpt, kmp_int16, 16, ^, 0 ) // __kmpc_atomic_fixed2_xor_cpt ATOMIC_CMPXCHG_CPT( fixed4, andb_cpt, kmp_int32, 32, &, 0 ) // __kmpc_atomic_fixed4_andb_cpt ATOMIC_CMPXCHG_CPT( fixed4, div_cpt, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt ATOMIC_CMPXCHG_CPT( fixed4u, div_cpt, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt ATOMIC_CMPXCHG_CPT( fixed4, mul_cpt, kmp_int32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul_cpt ATOMIC_CMPXCHG_CPT( fixed4, orb_cpt, kmp_int32, 32, |, 0 ) // __kmpc_atomic_fixed4_orb_cpt ATOMIC_CMPXCHG_CPT( fixed4, shl_cpt, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt ATOMIC_CMPXCHG_CPT( fixed4, shr_cpt, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4u, shr_cpt, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4, xor_cpt, kmp_int32, 32, ^, 0 ) // __kmpc_atomic_fixed4_xor_cpt ATOMIC_CMPXCHG_CPT( fixed8, andb_cpt, kmp_int64, 64, &, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb_cpt ATOMIC_CMPXCHG_CPT( fixed8, div_cpt, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt ATOMIC_CMPXCHG_CPT( fixed8u, div_cpt, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt ATOMIC_CMPXCHG_CPT( fixed8, mul_cpt, kmp_int64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt ATOMIC_CMPXCHG_CPT( fixed8, orb_cpt, kmp_int64, 64, |, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb_cpt ATOMIC_CMPXCHG_CPT( fixed8, shl_cpt, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt ATOMIC_CMPXCHG_CPT( fixed8, shr_cpt, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8u, shr_cpt, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8, xor_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor_cpt ATOMIC_CMPXCHG_CPT( float4, div_cpt, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt ATOMIC_CMPXCHG_CPT( float4, mul_cpt, kmp_real32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for C/C++ Reduction operators && and || // ------------------------------------------------------------------------ // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_L_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ new_value OP rhs; \ } else \ new_value = (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_L_CPT( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment #define ATOMIC_CMPX_L_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_L_CPT( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } ATOMIC_CMPX_L_CPT( fixed1, andl_cpt, char, 8, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl_cpt ATOMIC_CMPX_L_CPT( fixed1, orl_cpt, char, 8, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl_cpt ATOMIC_CMPX_L_CPT( fixed2, andl_cpt, short, 16, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl_cpt ATOMIC_CMPX_L_CPT( fixed2, orl_cpt, short, 16, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl_cpt ATOMIC_CMPX_L_CPT( fixed4, andl_cpt, kmp_int32, 32, &&, 0 ) // __kmpc_atomic_fixed4_andl_cpt ATOMIC_CMPX_L_CPT( fixed4, orl_cpt, kmp_int32, 32, ||, 0 ) // __kmpc_atomic_fixed4_orl_cpt ATOMIC_CMPX_L_CPT( fixed8, andl_cpt, kmp_int64, 64, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl_cpt ATOMIC_CMPX_L_CPT( fixed8, orl_cpt, kmp_int64, 64, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl_cpt // ------------------------------------------------------------------------- // Routines for Fortran operators that matched no one in C: // MAX, MIN, .EQV., .NEQV. // Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt // Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt // ------------------------------------------------------------------------- // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ old_value = *lhs; \ *lhs = rhs; \ if ( flag ) \ new_value = rhs; \ else \ new_value = old_value; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; \ // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT_CPT( OP, 0 ); \ } #else #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*TYPE old_value; */ \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ if( flag ) \ return rhs; \ else \ return old_value; \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ } \ return *lhs; \ } #define MIN_MAX_COMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ } \ return *lhs; \ } MIN_MAX_COMPXCHG_CPT( fixed1, max_cpt, char, 8, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max_cpt MIN_MAX_COMPXCHG_CPT( fixed1, min_cpt, char, 8, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min_cpt MIN_MAX_COMPXCHG_CPT( fixed2, max_cpt, short, 16, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max_cpt MIN_MAX_COMPXCHG_CPT( fixed2, min_cpt, short, 16, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min_cpt MIN_MAX_COMPXCHG_CPT( fixed4, max_cpt, kmp_int32, 32, <, 0 ) // __kmpc_atomic_fixed4_max_cpt MIN_MAX_COMPXCHG_CPT( fixed4, min_cpt, kmp_int32, 32, >, 0 ) // __kmpc_atomic_fixed4_min_cpt MIN_MAX_COMPXCHG_CPT( fixed8, max_cpt, kmp_int64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max_cpt MIN_MAX_COMPXCHG_CPT( fixed8, min_cpt, kmp_int64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min_cpt MIN_MAX_COMPXCHG_CPT( float4, max_cpt, kmp_real32, 32, <, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max_cpt MIN_MAX_COMPXCHG_CPT( float4, min_cpt, kmp_real32, 32, >, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min_cpt MIN_MAX_COMPXCHG_CPT( float8, max_cpt, kmp_real64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max_cpt MIN_MAX_COMPXCHG_CPT( float8, min_cpt, kmp_real64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min_cpt #if KMP_HAVE_QUAD MIN_MAX_CRITICAL_CPT( float16, max_cpt, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max_cpt MIN_MAX_CRITICAL_CPT( float16, min_cpt, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min_cpt #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL_CPT( float16, max_a16_cpt, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16_cpt MIN_MAX_CRITICAL_CPT( float16, min_a16_cpt, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_mix_a16_cpt #endif #endif // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CMPX_EQV_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_EQV_CPT(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ ATOMIC_CMPXCHG_CPT( fixed1, neqv_cpt, kmp_int8, 8, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed2, neqv_cpt, kmp_int16, 16, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed4, neqv_cpt, kmp_int32, 32, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed8, neqv_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv_cpt ATOMIC_CMPX_EQV_CPT( fixed1, eqv_cpt, kmp_int8, 8, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed2, eqv_cpt, kmp_int16, 16, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed4, eqv_cpt, kmp_int32, 32, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed8, eqv_cpt, kmp_int64, 64, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv_cpt // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_WRK( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_WRK(OP##=,LCK_ID) \ } // The end of workaround for cmplx4 /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT( float10, add_cpt, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add_cpt ATOMIC_CRITICAL_CPT( float10, sub_cpt, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt ATOMIC_CRITICAL_CPT( float10, mul_cpt, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt ATOMIC_CRITICAL_CPT( float10, div_cpt, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT( float16, add_cpt, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add_cpt ATOMIC_CRITICAL_CPT( float16, sub_cpt, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt ATOMIC_CRITICAL_CPT( float16, mul_cpt, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul_cpt ATOMIC_CRITICAL_CPT( float16, div_cpt, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( float16, add_a16_cpt, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16_cpt ATOMIC_CRITICAL_CPT( float16, sub_a16_cpt, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt ATOMIC_CRITICAL_CPT( float16, mul_a16_cpt, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16_cpt ATOMIC_CRITICAL_CPT( float16, div_a16_cpt, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt #endif #endif // routines for complex types // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_WRK( cmplx4, add_cpt, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, sub_cpt, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, mul_cpt, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, div_cpt, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt ATOMIC_CRITICAL_CPT( cmplx8, add_cpt, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add_cpt ATOMIC_CRITICAL_CPT( cmplx8, sub_cpt, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt ATOMIC_CRITICAL_CPT( cmplx8, mul_cpt, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul_cpt ATOMIC_CRITICAL_CPT( cmplx8, div_cpt, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt ATOMIC_CRITICAL_CPT( cmplx10, add_cpt, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add_cpt ATOMIC_CRITICAL_CPT( cmplx10, sub_cpt, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt ATOMIC_CRITICAL_CPT( cmplx10, mul_cpt, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul_cpt ATOMIC_CRITICAL_CPT( cmplx10, div_cpt, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT( cmplx16, add_cpt, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_cpt, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_cpt, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_cpt, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ /*temp_val = (*lhs);*/\ (*lhs) = (rhs) OP (*lhs); \ new_value = (*lhs); \ } else { \ new_value = (*lhs);\ (*lhs) = (rhs) OP (*lhs); \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ } ATOMIC_CMPXCHG_CPT_REV( fixed1, div_cpt_rev, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, div_cpt_rev, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shl_cpt_rev, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shr_cpt_rev, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, shr_cpt_rev, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, sub_cpt_rev, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, div_cpt_rev, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, div_cpt_rev, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shl_cpt_rev, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shr_cpt_rev, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, shr_cpt_rev, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, sub_cpt_rev, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, div_cpt_rev, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, div_cpt_rev, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shl_cpt_rev, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shr_cpt_rev, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, shr_cpt_rev, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, sub_cpt_rev, kmp_int32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, div_cpt_rev, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, div_cpt_rev, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shl_cpt_rev, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shr_cpt_rev, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, shr_cpt_rev, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, sub_cpt_rev, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, div_cpt_rev, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, sub_cpt_rev, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, div_cpt_rev, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, sub_cpt_rev, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/\ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT_REV( float10, sub_cpt_rev, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float10, div_cpt_rev, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT_REV( float16, sub_cpt_rev, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_cpt_rev, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_a16_cpt_rev, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt_rev #endif #endif // routines for complex types // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) = (rhs) OP (*lhs); \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) = (rhs) OP (*lhs); \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_REV_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ } // The end of workaround for cmplx4 // !!! TODO: check if we need to return void for cmplx4 routines // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt_rev #endif #endif // OpenMP 4.0 Capture-write (swap): {v = x; x = expr;} #define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ old_value = (*lhs); \ (*lhs) = rhs; \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return old_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP( 0 ); \ } #else #define GOMP_CRITICAL_SWP(FLAG) #endif /* KMP_GOMP_COMPAT */ #define ATOMIC_XCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_FIXED##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_REAL##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define CMPXCHG_SWP(TYPE,BITS) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CMPXCHG_SWP(TYPE,BITS) \ } ATOMIC_XCHG_SWP( fixed1, kmp_int8, 8, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_swp ATOMIC_XCHG_SWP( fixed2, kmp_int16, 16, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_swp ATOMIC_XCHG_SWP( fixed4, kmp_int32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_swp ATOMIC_XCHG_FLOAT_SWP( float4, kmp_real32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_float4_swp #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_CMPXCHG_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #else ATOMIC_XCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_XCHG_FLOAT_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #endif // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) #define ATOMIC_CRITICAL_SWP(TYPE_ID,TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CRITICAL_SWP(LCK_ID) \ } // ------------------------------------------------------------------------ // !!! TODO: check if we need to return void for cmplx4 routines // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP_WRK(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ tmp = (*lhs); \ (*lhs) = (rhs); \ (*out) = tmp; \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP_WRK(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP_WRK( 0 ); \ } #else #define GOMP_CRITICAL_SWP_WRK(FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ TYPE tmp; \ GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \ CRITICAL_SWP_WRK(LCK_ID) \ } // The end of workaround for cmplx4 ATOMIC_CRITICAL_SWP( float10, long double, 10r, 1 ) // __kmpc_atomic_float10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( float16, QUAD_LEGACY, 16r, 1 ) // __kmpc_atomic_float16_swp #endif // cmplx4 routine to return void ATOMIC_CRITICAL_SWP_WRK( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp //ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp ATOMIC_CRITICAL_SWP( cmplx8, kmp_cmplx64, 16c, 1 ) // __kmpc_atomic_cmplx8_swp ATOMIC_CRITICAL_SWP( cmplx10, kmp_cmplx80, 20c, 1 ) // __kmpc_atomic_cmplx10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( cmplx16, CPLX128_LEG, 32c, 1 ) // __kmpc_atomic_cmplx16_swp #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_SWP( float16_a16, Quad_a16_t, 16r, 1 ) // __kmpc_atomic_float16_a16_swp ATOMIC_CRITICAL_SWP( cmplx16_a16, kmp_cmplx128_a16_t, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_swp #endif #endif // End of OpenMP 4.0 Capture #endif //OMP_40_ENABLED #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 #undef OP_CRITICAL /* ------------------------------------------------------------------------ */ /* Generic atomic routines */ /* ------------------------------------------------------------------------ */ void __kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #else TRUE #endif ) { kmp_int8 old_value, new_value; old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ8 ( (kmp_int8 *) lhs, *(kmp_int8 *) &old_value, *(kmp_int8 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 1-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_1i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_1i, gtid ); } } void __kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x1) /* make sure address is 2-byte aligned */ #endif ) { kmp_int16 old_value, new_value; old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ16 ( (kmp_int16 *) lhs, *(kmp_int16 *) &old_value, *(kmp_int16 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 2-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_2i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_2i, gtid ); } } void __kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( // // FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints. // Gomp compatibility is broken if this routine is called for floats. // #if KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x3) /* make sure address is 4-byte aligned */ #endif ) { kmp_int32 old_value, new_value; old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ32 ( (kmp_int32 *) lhs, *(kmp_int32 *) &old_value, *(kmp_int32 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_4i for all 4-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_4i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_4i, gtid ); } } void __kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x7) /* make sure address is 8-byte aligned */ #endif ) { kmp_int64 old_value, new_value; old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ64 ( (kmp_int64 *) lhs, *(kmp_int64 *) &old_value, *(kmp_int64 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_8i for all 8-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_8i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_8i, gtid ); } } void __kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_10r, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_10r, gtid ); } void __kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_16c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_16c, gtid ); } void __kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_20c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_20c, gtid ); } void __kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_32c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_32c, gtid ); } // AC: same two routines as GOMP_atomic_start/end, but will be called by our compiler // duplicated in order to not use 3-party names in pure Intel code // TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin. void __kmpc_atomic_start(void) { int gtid = __kmp_entry_gtid(); KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid)); __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid); } void __kmpc_atomic_end(void) { int gtid = __kmp_get_gtid(); KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid)); __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid); } /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /*! @} */ // end of file
harness_parallel.h
// Copyright (C) 2021 Intel Corporation // // SPDX-License-Identifier: Apache-2.0 // values for GRAIN macro #define GS_STATIC -1 #define GS_2CHUNKS -2 #define GS_3CHUNKS -3 #define GS_4CHUNKS -4 #define GS_5CHUNKS -5 #define GS_6CHUNKS -6 #define GS_7CHUNKS -7 #define GS_8CHUNKS -8 #define GS_16CHUNKS -16 #define GS_32CHUNKS -32 #define GS_64CHUNKS -64 #define GS_OPENCL -16 #ifndef GS_BEST #define GS_BEST 1 #endif // values for PARALLEL macro #define OMP_STATIC 1 #define OMP_DYNAMIC 2 #define OMP_GUIDED 3 #define OMP_RUNTIME 4 #define OMP_S_STEAL 5 #define OMP_STATIC_STEAL 5 #ifndef OMP_BEST #define OMP_BEST OMP_STATIC #endif #define TBB_SIMPLE 10 #define TBB_AUTO 11 #define TBB_AFFINITY 12 #define TBB_C_AFF 14 #define TBB_CONST_AFFINITY 14 #define TBB_STATIC 15 #define TBB_RAPID 17 #ifndef TBB_BEST #define TBB_BEST TBB_STATIC #endif #define TF_FOR_EACH 30 #ifndef PARALLEL #define PARALLEL TBB_SIMPLE #endif #if PARALLEL < TBB_SIMPLE #define __USE_OPENMP__ 1 #elif PARALLEL < TF_FOR_EACH #define __USE_TBB__ 1 #else #define __USE_TF__ 1 #endif #include <algorithm> #include <stdio.h> #include <atomic> #if HAVE_TBB #include <tbb/info.h> #endif #if HAVE_OMP #include <omp.h> #endif #if __USE_TBB__ #if PARALLEL == TBB_RAPID #include "rapid_start.h" #endif #include <tbb/parallel_for.h> #include <tbb/blocked_range.h> #include <tbb/tbb_allocator.h> #include <tbb/scalable_allocator.h> #include <tbb/global_control.h> #include <tbb/version.h> //#include "harness_pinner.h" #include <tbb/task_arena.h> #ifndef __USE_TASK_ARENA__ #define __USE_TASK_ARENA__ 1 #endif #ifndef __USE_OBSERVER__ #define __USE_OBSERVER__ 0 // I see no positive changes from using observer now #endif #elif __USE_TF__ #include <taskflow/taskflow.hpp> #endif #include <sys/syscall.h> #ifndef __INTEL_COMPILER #define __forceinline inline #endif #define MACRO_STRING_AUX(...) #__VA_ARGS__ #define MACRO_STRING(x) MACRO_STRING_AUX( x ) namespace Harness { static int nThreads; #if __USE_TBB__ #if __USE_OBSERVER__ struct LeavingObserver : public tbb::task_scheduler_observer { LeavingObserver() : tbb::task_scheduler_observer(TBB_INTERFACE_VERSION < 7003) { printf("Using observer\n"); fflush(0); observe(true); } /*override*/ void on_scheduler_entry(bool isWorker) { #ifdef LOG_INFO if(isWorker) printf("+"); #endif } /*override*/ void on_scheduler_exit(bool isWorker) { #ifdef LOG_INFO if(isWorker) printf("-"); #endif } }; static __thread LeavingObserver * g_observer; #endif //__USE_OBSERVER__ static std::atomic<int> g_globalRefCounter; static __thread int g_localRefCounter = 0; static __thread tbb::global_control * g_tbbConfig = NULL; #if PARALLEL == TBB_RAPID static Harness::RapidStart g_rs; #undef __USE_TASK_ARENA__ #elif __USE_TASK_ARENA__ static tbb::task_arena* g_globalArena = NULL; #endif #endif //__USE_TBB__ #if __USE_TF__ tf::Executor executor; tf::Taskflow taskflow; #endif // __USE_TF__ static int GetNumThreads() { #if HAVE_TBB return ::tbb::info::default_concurrency(); //tbb::this_task_arena::max_concurrency(); #elif HAVE_OMP return omp_get_max_threads(); #else return std::thread::hardware_concurrency(); #endif } static int InitParallel(int n = 0) { #if __USE_TBB__ nThreads = n? n : GetNumThreads(); if(TBB_INTERFACE_VERSION != TBB_runtime_interface_version()) { fprintf(stderr, "ERROR: Compiled with TBB interface version " __TBB_STRING(TBB_INTERFACE_VERSION) " while runtime provides %d\n", TBB_runtime_interface_version()); fflush(stderr); exit(-2); } if(tbb::tbb_allocator<int>::allocator_type() != tbb::tbb_allocator<int>::scalable) { fprintf(stderr, "ERROR: Scalable allocator library must be loaded.\n"); fflush(stderr); exit(-2); } #ifdef LOG_INFO setenv("TBB_VERSION", "1", 1); #endif setenv("TBB_MALLOC_USE_HUGE_PAGES", "1", 1); scalable_allocation_mode(USE_HUGE_PAGES, 1); printf("Setting %d threads for TBB\n", nThreads); fflush(0); if(!g_localRefCounter++) { __TBB_ASSERT(!g_tbbConfig,0); //Harness::LimitNumberOfThreads(n, (n+MIC_CORES-1)/MIC_CORES); g_tbbConfig = new tbb::global_control(tbb::global_control::max_allowed_parallelism, nThreads); if(!g_globalRefCounter++) { #if PARALLEL == TBB_RAPID //Harness::PinTbbThreads( nThreads ); g_rs.init(nThreads); #elif __USE_TASK_ARENA__ __TBB_ASSERT(!g_globalArena,0); #ifdef LOG_INFO printf("Using TASK_ARENA(explicit) with %d threads\n", nThreads); fflush(0); #endif g_globalArena = new tbb::task_arena(nThreads, 1); g_globalArena->execute( [&]{ //Harness::PinTbbThreads( nThreads ); #if __USE_OBSERVER__ g_observer = new LeavingObserver; #endif }); #else Harness::PinTbbThreads( nThreads ); #if __USE_OBSERVER__ g_observer = new LeavingObserver; #endif #endif } } #elif __USE_OPENMP__ // OpenMP #if PARALLEL == OMP_S_STEAL setenv("OMP_SCHEDULE", "static_steal", 1); #endif #ifdef KMP_AFFINITY #ifdef LOG_INFO puts( "KMP_AFFINITY=" MACRO_STRING(KMP_AFFINITY) ); #endif setenv("KMP_AFFINITY", MACRO_STRING(KMP_AFFINITY), 1); #else setenv("KMP_AFFINITY", "granularity=fine,balanced", 1); #endif #ifdef KMP_BLOCKTIME #ifdef LOG_INFO puts( "KMP_BLOCKTIME=" MACRO_STRING(KMP_BLOCKTIME) ); #endif setenv("KMP_BLOCKTIME", MACRO_STRING(KMP_BLOCKTIME), 1); #else setenv("KMP_BLOCKTIME", "infinite", 1); // no sleeps #endif // setenv("KMP_LIBRARY", "turnaround", 1); // disables yields #ifdef LOG_INFO setenv("KMP_VERSION", "1", 1); // setenv("KMP_D_DEBUG", "7", 1); #endif nThreads = n? n : GetNumThreads(); // configure OMP environment printf("Setting %d threads for OMP\n", nThreads); fflush(0); omp_set_num_threads(nThreads); // Warm up OMP workers #pragma omp parallel for for(int j=0; j<nThreads; ++j) { #if 0 cpu_set_t target_mask; CPU_ZERO(&target_mask); sched_getaffinity(0, sizeof(target_mask), &target_mask); char temp[1024]; for(int i=0; i<248/8; ++i) { sprintf(temp+2*i, "%02X",(int)(((char*)&target_mask)[(248/8)-i-1])&0xFF); } printf("Pipeline thread, worker = %d, tid=%x, %s\n", j, (int)syscall(SYS_gettid), temp); fflush(0); #endif } #elif __USE_TF__ nThreads = n? n : GetNumThreads(); printf("Setting %d threads for TaskFlow\n", nThreads); fflush(0); taskflow.for_each_index(0, nThreads, 1, [](int _){}); executor.run(taskflow).get(); #endif return nThreads; } static void DestroyParallel() { #ifdef __USE_TBB__ if ( !--g_localRefCounter ) { #if __USE_OBSERVER__ g_observer->observe(false); delete g_observer; #endif //__TBB_ASSERT(g_tbbConfig, 0); // destroy all TBB threads delete g_tbbConfig; g_tbbConfig = 0; if( !--g_globalRefCounter ) { #if __USE_TASK_ARENA__ delete g_globalArena; #endif #ifdef LOG_INFO printf("Shutting down TBB global scheduler\n"); fflush(0); #endif } } #endif } template<typename Body> struct executive_range_body { executive_range_body(const Body &b) : my_func(b) {} const Body &my_func; #if __USE_TBB__ template<typename Iter> __forceinline void operator()(const tbb::blocked_range<Iter> &r) const { operator()( r.begin(), r.end(), r.grainsize() ); } #endif template<typename Iter> __forceinline void operator()(Iter s, Iter e, int info=-1) const { #if __INTEL_COMPILER #pragma ivdep #endif for( Iter i = s; i < e; i++) operator()( i ); } template<typename Iter> __forceinline void operator()(Iter i) const { #ifdef __KERNEL_FORCEINLINE #pragma forceinline #elif __INTEL_COMPILER #pragma noinline #endif my_func( i ); } }; #ifdef GRAIN #if GRAIN > 0 // GRAIN is specified as absolute grain-size #define GS(n) std::max(g, Iter(GRAIN)) #elif GRAIN < 0 // GRAIN by modulo is number of chunks per thread. Variables are defined in the function below #define GS(n) std::max(g, ((e-s)/(n*(- GRAIN ))) ) #endif #else #define GS(n) g #endif template <typename Iter, typename Body> void parallel_for( Iter s, Iter e, Iter g, const Body &b) { #ifdef LOG_INFO if( sizeof(Body) >= 256 ) { static bool printed = false; if( !printed ) { printf("The task Body size is too big: %lu\n", sizeof(Body)); fflush(0); printed = true; } } #endif #if GRAIN < 0 const int per_thread = - GRAIN; #elif GRAIN > 0 const int per_thread = std::min(32, int(e-s)/GRAIN ); #else const int per_thread = std::min(32, int((e-s)/g) ); #endif executive_range_body<Body> executive_range(b); #ifdef LOG_RANGES printf("Parallel for [%d,%d):%d, thread id = %x\n", int(s), int(e), int(GS(nThreads)), (int)syscall(SYS_gettid)); #endif #if __USE_OPENMP__ g = GS(nThreads); #pragma omp parallel { #if PARALLEL==OMP_STATIC #pragma omp for nowait schedule(static) #elif PARALLEL==OMP_DYNAMIC #pragma omp for nowait schedule(dynamic, g) #elif PARALLEL==OMP_GUIDED #pragma omp for nowait schedule(guided, g) #elif PARALLEL==OMP_RUNTIME || PARALLEL == OMP_S_STEAL #pragma omp for nowait schedule(runtime) #else #error Wrong PARALLEL mode #endif #if __INTEL_COMPILER #pragma ivdep #endif for(int i = s; i < e; i++) executive_range( i ); } #elif PARALLEL == CILK_SIMPLE #if __INTEL_COMPILER #pragma ivdep #endif cilk_for(Iter i = s; i < e; i++) executive_range( i ); #elif PARALLEL == TBB_RAPID g_rs.parallel_ranges(s, e, executive_range); #elif PARALLEL == TF_FOR_EACH taskflow.for_each_index(s, e, 1, executive_range); executor.run(taskflow).get(); #else // other TBB parallel_fors // implied: static tbb::task_group_context context(tbb::task_group_context::isolated, tbb::task_group_context::default_traits); static tbb::task_group_context context(tbb::task_group_context::bound, tbb::task_group_context::default_traits | tbb::task_group_context::concurrent_wait); #if __USE_TASK_ARENA__ g_globalArena->execute( [&]{ #endif #if PARALLEL == TBB_STATIC Harness::static_parallel_ranges(s, e, executive_range, nThreads); #elif PARALLEL == TBB_NESTED Harness::static_parallel_ranges(s, e, [per_thread,executive_range](Iter s, Iter e, int p) { tscg_task_start( out ); tscg_task_data( data, "%d + %d #%d", int(s), int(e-s), p ); Iter m = s, per = std::min(e-s, Iter(per_thread)); if( per_thread > 1 ) { m += (e-s)/per--; executive_range(s, m, p); } tbb::parallel_for(tbb::blocked_range<Iter>(m, e, (e-m)/per*2-1 ), executive_range, tbb::simple_partitioner(), context); tscg_task_stop( out, data ); }, nThreads); #else // regular partitioners #if PARALLEL==TBB_SIMPLE const tbb::simple_partitioner part; #elif PARALLEL==TBB_AUTO const tbb::auto_partitioner part; #elif PARALLEL==TBB_AFFINITY static tbb::affinity_partitioner part; #elif PARALLEL==TBB_CONST_AFFINITY tbb::affinity_partitioner part; #elif PARALLEL==TBB_OPENCL || PARALLEL==TBB_UNEVEN const tbb::opencl_partitioner part; #else #error Wrong PARALLEL mode #endif tbb::parallel_for(tbb::blocked_range<Iter>(s,e,GS(nThreads)*2-1), executive_range, part, context); #endif /* partitioners */ #if __USE_TASK_ARENA__ }); #endif #endif /*outermost*/ } }
learnreg.h
#ifndef _LEARNREG_H // Include guard #define _LEARNREG_H #include <iostream> // cout,cerr,etc. #include <stdio.h> // printf, etc. #include <stdexcept> // Standard exceptions #include <omp.h> // Eigen #include <Eigen/Dense> #include <Eigen/SparseCore> #include "utils.h" typedef Eigen::VectorXd VecType; typedef Eigen::MatrixXd MatType; // Simple output operator for vectors /* template<typename T> std::ostream& operator<< (std::ostream& out, const std::vector<T>& v) { if ( !v.empty() ) { out << '['; std::copy (v.begin(), v.end(), std::ostream_iterator<T>(out, ", ")); out << "\b\b]"; } return out; } */ /***** Definition of exponential families *****/ #ifndef _EXPFAM // Include guard #define _EXPFAM enum ExpFam { Poisson }; #endif // Sufficient statistics T(x) template<ExpFam Family, typename T1, typename T2> void T( const T1& x, T2& y ) { y = x; } // Log partition function A(theta) template<ExpFam Family, typename T1, typename T2> void A( const T1& x, T2& y ) { y = x.derived().array().exp(); } template<ExpFam Family> void A( const double& x, double& y ) { y = std::exp(x); } // Derivative of log partition function dA(theta)/dtheta template<ExpFam Family, typename T1, typename T2> void dA( const T1& x, T2& y ) { A<Family>(x,y); }; template<ExpFam Family, typename T1, typename T2, typename T3> void dA( const T1& x, const T2& A, T3& y ) { y = A; }; // Second derivative of log partition function d^2A(theta)/dtheta^2 template<ExpFam Family, typename T1, typename T2> void d2A( const T1& x, T2& y ) { A<Family>(x,y); }; template<ExpFam Family, typename T1, typename T2, typename T3> void d2A( const T1& x, const T2& A, T3& y ) { y = A; }; template<ExpFam Family, typename T1, typename T2, typename T3, typename T4> void d2A( const T1& x, const T2& A, const T3& dA, T4& y ) { y = A; }; /***** Logical interface for shifted (remove s column) and padded (add all ones column) X using original X *****/ template<typename XType> struct VirtualMatrix { const XType& origX; // Reference to the original matrix class bool isTransposed; size_t p; // Total number of nodes size_t n; // Total number of nodes size_t s; // The current column from 0..p-1? VirtualMatrix(const XType& origX, size_t s) : origX(origX), s(s), isTransposed(false) { p = origX.cols(); // Extract number of variables n = origX.rows(); // Extract number of instances/rows } VirtualMatrix(const XType& origX, size_t s, bool isTransposed ) : origX(origX), s(s), isTransposed(isTransposed) { p = origX.cols(); // Extract number of variables n = origX.rows(); // Extract number of instances/rows } VirtualMatrix<XType> transpose() const { return VirtualMatrix<XType>(origX, s, !isTransposed); } VecType operator*(const VecType& y ) const { // Calculate product in piecewise fashion size_t nFront = s; size_t nBack = p-s-1; if( isTransposed ) { assert(y.cols() == 1 && y.rows() == n && "y is not the correct size"); VecType result = VecType::Zero(p,1); result(0) = y.sum(); if(nFront > 0) { result.segment(1,nFront) = origX.leftCols(nFront).transpose()*y; } if(nBack > 0) { result.segment(nFront+1,nBack) = origX.rightCols(nBack).transpose()*y; } return result; } else { assert(y.cols() == 1 && y.rows() == p && "y is not the correct size"); if(nFront > 0 && nBack > 0) { VecType v(origX.leftCols(nFront)*y.block(1,0,nFront,1) + origX.rightCols(nBack)*y.bottomRows(nBack)); return y(0) + v.array(); } else if (nFront > 0) { // Implicitly nBack = 0 VecType v(origX.leftCols(nFront)*y.block(1,0,nFront,1)); return y(0) + v.array(); } else { // Implicitly nFront = 0 VecType v(origX.rightCols(nBack)*y.bottomRows(nBack)); return y(0) + v.array(); } } } template<typename T, typename T2> VecType hessianDiag( const T& d2A, const T2& colIdx, double scale ) const { VecType Y = VecType::Zero(colIdx.size(),1); double temp; size_t t; for (size_t i = 0; i < colIdx.size(); ++i) { // Only loop over selected colIdx if( colIdx[i] == 0 ) { temp = d2A.sum(); // Just ones times diagVec } else { t = adjustForS( colIdx[i] ); temp = origX.col(t).cwiseProduct(d2A).dot( origX.col(t) ); } Y(i) = scale*temp; } return Y; } template<typename T> void updateR(const double mu, const size_t t, T& r) const { if( t == 0 ) { r.array() += mu; // All ones column } else { r += mu*origX.col( adjustForS(t) ); } } template<typename T, typename T2> double dotR( const T& r, const T2& d2A, const size_t t, const double scale ) const { if(t == 0) { return scale*r.dot(d2A); // All ones } else { return scale*(r.transpose()*origX.col(adjustForS(t)).cwiseProduct(d2A)).sum(); } } /** Note this function is not used anymore **/ // Function to compute the hessian // To get indices based on vector: MatrixXi indices = (A.array() < 3).cast<int>(); template<typename T, typename T2, typename T3> void XdiagX( const T& diagVec, const T2& colIdx, T3& Y, const double scale ) const { if( isTransposed ) { std::cerr << "XdiagX is not implemented for transpose matrices yet" << std::endl; std::exit(1); } // Error check assert(diagVec.size() == n && "diagVec is not a vector"); // Need to adjust colIdx based on s Y = T3::Zero( colIdx.size(), colIdx.size() ); // Assuming sparse matrix for (size_t i=0; i < colIdx.size(); ++i) { // Only loop over selected colIdx for (size_t i2=0; i2 < colIdx.size(); ++i2) { // Only loop over selected colIdx double temp = 0; size_t t = adjustForS( colIdx[i] ); // Change actual column depending on s size_t t2 = adjustForS( colIdx[i2] ); // Change actual column depending on s if( colIdx[i] == 0 && colIdx[i2] == 0 ) { temp = diagVec.sum(); // Just ones times diagVec } else if( colIdx[i] == 0 || colIdx[i2] == 0) { // Choose which column to sum over size_t colT; if( colIdx[i] == 0 ) { colT = t2; } else { colT = t; } // Sum over this column scaling by diagVec for( typename XType::InnerIterator it(origX, colT); it; ++it) { temp += diagVec[it.row()] * it.value(); } } else { temp = origX.col(t).cwiseProduct(diagVec).dot(origX.col(t2)); } Y(i2, i) = scale*temp; } } } template<typename T> T adjustForS( const T t ) const { // Adjust the column index t to account for the fact that we are using logical indexing if( t <= s ) { return t-1; // Subtract 1 because of all ones column } else { return t; // Otherwise same because of all ones column } } }; template<typename XType> std::ostream& operator<< (std::ostream& out, const VirtualMatrix<XType>& vX ) { for(size_t r = 0; r < vX.n; ++r) { for(size_t c = 0; c < vX.p; ++c) { if(c == 0) { out << 1; } else if( c <= vX.s ) { out << " " << vX.origX.coeff(r, c-1 ); } else { out << " " << vX.origX.coeff(r, c ); } } if(r < vX.n-1) out << std::endl; } return out; } template<ExpFam Family> struct GeneralizedLinearModel { // Evaluate objective value private: double verbosity; double evalobj( const VecType& beta, const VecType& Acur, const VecType& xTilde, const double lam ) const { double n = Acur.size(); double lassoTerm = lam * beta.tail(beta.size()-1).array().abs().sum(); double linTerm = beta.dot(xTilde); double Aterm = Acur.sum(); return (1/n)*( -linTerm + Aterm ) + lassoTerm; } public: GeneralizedLinearModel( ): verbosity(3) {} GeneralizedLinearModel( double verbosity ): verbosity(verbosity) {} // Main learning function template< typename XType, typename YType, typename BetaType > void learnreg( const VirtualMatrix<XType>& vX, // Note this is a virtual X rather than an eigen type const YType& Y, const double lam, BetaType& beta ) const { // Declare variables VecType eta, xTilde, grad, Acur, dAcur, d2Acur; BetaType betaNew; // To store proposed new beta MatType hessianFree; std::vector<size_t> freeSet; size_t n = vX.n, p = vX.p; double obj, obj0, objNew; // Initialize variables if(beta.size() != p) { // If beta is uninitialized beta = BetaType::Zero(p,1); // Initialize node parameter near mean beta(0) = std::log( Y.sum()/Y.size() ); eta = VecType::Constant(n,1,beta(0)); } else { eta = vX*beta; } A<Family>(eta, Acur); xTilde = vX.transpose()*Y; obj = obj0 = evalobj( beta, Acur, xTilde, lam ); // Outer loop size_t maxOuterIter = 500; for(size_t outerIter = 0; outerIter < maxOuterIter; ++outerIter) { // Calculate gradient dA<Family>(eta, Acur, dAcur); grad = (1/(double)n)*(-xTilde + vX.transpose()*dAcur); // Compute free set freeSet.clear(); for(size_t i = 0; i < grad.size(); ++i) if( i == 0 || std::abs(grad(i)) >= lam || beta(i) != 0 ) freeSet.push_back(i); // Calculate Hessian on free set d2A<Family>(eta, Acur, dAcur, d2Acur); VecType hessianDiag = vX.hessianDiag( d2Acur, freeSet, 1/(double)n); // Inner loop to approximate Newton direction VecType dFree = VecType::Zero(freeSet.size()); VecType r = VecType::Zero(n,1); // Maintain X*d product which is initially 0 size_t t; double a,b,c,mu,z; size_t maxInnerIter = floor(1 + ((double)outerIter+1)/3.0); for(size_t innerIter = 0; innerIter < maxInnerIter; ++innerIter) { for(size_t i = 0; i < freeSet.size(); ++i) { // Solve single variable problem t = freeSet[i]; a = hessianDiag(i); b = grad(t) + vX.dotR(r, d2Acur, t, 1/(double)n); c = beta(t) + dFree(i); if( t == 0 ) { mu = -b/a; // Without regularization } else { z = c - b/a; mu = -c + copysign( fmax( std::abs(z) - lam/a, 0), z ); } if(mu != 0) { dFree(i) += mu; vX.updateR(mu, t, r); } } //message(3,verbosity, " innerIter = %d", innerIter); } // Inner loop to calculate step size size_t maxStepIter = 50; double stepSize = 1, stepParam1 = 0.5, stepParam2 = 1e-10, stepConstant; for(size_t stepIter = 0; stepIter < maxStepIter; ++stepIter ) { // Compute new beta betaNew = beta; for( size_t i = 0; i < freeSet.size(); ++i ) betaNew[ freeSet[i] ] += stepSize*dFree(i); // Compute some constant stepsize quantities if( stepIter == 0 ) { double gradTimesD = 0; for( size_t i = 0; i < freeSet.size(); ++i ) gradTimesD += grad(freeSet[i])*dFree(i); double sumBeta = beta.array().abs().sum(); double sumBeta0 = betaNew.array().abs().sum(); stepConstant = stepParam2*(gradTimesD + sumBeta0 - sumBeta); } // Compute objective eta = vX*betaNew; A<Family>(eta, Acur); objNew = evalobj( betaNew, Acur, xTilde, lam ); // Check Armijo step condition if( objNew <= obj + stepSize*stepConstant ) { break; } else { stepSize *= stepParam1; } } // Update parameters double relDiffBeta = (betaNew-beta).norm()/beta.norm(); beta = betaNew; double relDiff = (obj-objNew)/obj; obj = objNew; //message(0, verbosity, " outerIter = %d, obj = %g, relDiffObj = %g, relDiffBeta = %g, stepSize = %g", outerIter, objNew, relDiff, relDiffBeta, stepSize ); if(relDiffBeta < 1e-5) { break; } } } // Alias when calling with simple XType template< typename XType, typename YType, typename BetaType > void learnreg( const XType& X, const YType& Y, const double lam, const double nodeBeta, BetaType& beta ) const { // Create padded X XType Xpadded(X.rows(), X.cols()+1); Xpadded.rightCols(X.cols()) = X; // Make virtual X from padded (i.e. remove column 0 which was already padded) VirtualMatrix<XType> vX(Xpadded, 0); // Setup Y based on given nodeBeta YType Ymod = Y.array() + nodeBeta; // Run regression with this new virtualized program learnreg(vX, Ymod, lam, beta); } }; template<ExpFam Family> struct GeneralizedMRF { private: double verbosity; size_t nThreads; public: GeneralizedMRF( ): verbosity(2) {} GeneralizedMRF( double verbosity ): verbosity(verbosity) {} GeneralizedMRF( double verbosity, size_t nThreads ): verbosity(verbosity), nThreads(nThreads) { if(nThreads != 0) { omp_set_num_threads(nThreads); } } /* typedef Eigen::SparseMatrix<double> XType; typedef Eigen::VectorXd ThetaNodeType; typedef Eigen::SparseMatrix<double> ThetaEdgeType; */ // Alias for nodeBeta = 0 /* template< typename XType, typename ThetaNodeType, typename ThetaEdgeType > void learnmrf( const XType& X, // Note this is a virtual X rather than an eigen type const double lam, ThetaNodeType& thetaNode, ThetaEdgeType& thetaEdge ) { learnmrf(X,lam,0,thetaNode,thetaEdge); } */ template< typename XType, typename ThetaNodeType, typename ThetaEdgeType > void learnmrf( const XType& X, const double lam, const double nodeBeta, ThetaNodeType& thetaNode, ThetaEdgeType& thetaEdge ) { // Initialize variables size_t n = X.rows(); size_t p = X.cols(); if(thetaNode.size() != p){ // Initialize node variables to close to correct if no edges instead of 0 VecType ones = VecType::Constant(n,1,1); VecType sumX = (X.transpose()*ones).array() + nodeBeta*n; thetaNode = (sumX/X.rows()).array().log(); } if(thetaEdge.cols() != p || thetaEdge.cols() != p) { thetaEdge.resize(p,p); thetaEdge.setZero(); } // Learn p regressions in parallel and combine std::vector< Eigen::Triplet<double> > tripletListArray[p]; GeneralizedLinearModel<Poisson> poisson(verbosity-1); // Reduce verbosity size_t threadId[p]; // Parallel for loop (dynamic because work is not evenly distributed) #pragma omp parallel for schedule(dynamic) for(size_t s = 0; s < p; ++s) { // Setup arguments VirtualMatrix<XType> vX( X, s ); VecType phi = VecType::Zero(p,1); phi(0) = thetaNode(s); for(typename ThetaEdgeType::InnerIterator it(thetaEdge, s); it; ++it) { if(it.row() < s) { phi(it.row()+1) = it.value(); } else if(it.row() > s) { phi(it.row()) = it.value(); } } // Run regression Eigen::VectorXd y = X.col(s); y.array() += nodeBeta; poisson.learnreg( vX, y, lam, phi ); // Save results in output vectors for(size_t i = 0; i < phi.size(); ++i) { if(i == 0) { thetaNode(s) = phi(i); } else if( phi(i) != 0 ) { if( i <= s) { tripletListArray[s].push_back( Eigen::Triplet<double>(s, i-1, phi(i) )); } else { tripletListArray[s].push_back( Eigen::Triplet<double>(s, i, phi(i) )); } } } threadId[s] = omp_get_thread_num(); //std::cout << threadId[s]; } //std::cout << std::endl; // Display output checking for thread_id //for(size_t s = 0; s < p; ++s) message(2,verbosity, "s = %d, thread_id = %d", s, threadId[s]); // Concatenate all tripletLists size_t nnz = 0; for(size_t s = 0; s < p; ++s) nnz += tripletListArray[s].size(); std::vector< Eigen::Triplet<double> > triplets; triplets.reserve(nnz); for(size_t s = 0; s < p; ++s) triplets.insert(triplets.end(), tripletListArray[s].begin(), tripletListArray[s].end()); assert( triplets.size() == nnz && "Triplets size is not equal to the concatenation of all triplet list sizes"); // Create thetaEdge from this triplet list thetaEdge.setFromTriplets( triplets.begin(), triplets.end() ); } }; #endif
pslansy.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzlansy.c, normal z -> s, Fri Sep 28 17:38:13 2018 * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (float*)plasma_tile_addr(A, m, n) /***************************************************************************//** * Parallel tile calculation of max, one, infinity or Frobenius matrix norm * for a symmetric matrix. ******************************************************************************/ void plasma_pslansy(plasma_enum_t norm, plasma_enum_t uplo, plasma_desc_t A, float *work, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; switch (norm) { float stub; float *workspace; float *scale; float *sumsq; //================ // PlasmaMaxNorm //================ case PlasmaMaxNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); if (uplo == PlasmaLower) { for (int n = 0; n < m; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_slange(PlasmaMaxNorm, mvam, nvan, A(m, n), ldam, &stub, &work[A.mt*n+m], sequence, request); } } else { // PlasmaUpper for (int n = m+1; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_slange(PlasmaMaxNorm, mvam, nvan, A(m, n), ldam, &stub, &work[A.mt*n+m], sequence, request); } } plasma_core_omp_slansy(PlasmaMaxNorm, uplo, mvam, A(m, m), ldam, &stub, &work[A.mt*m+m], sequence, request); } #pragma omp taskwait plasma_core_omp_slansy(PlasmaMaxNorm, uplo, A.nt, work, A.mt, &stub, value, sequence, request); break; //================ // PlasmaOneNorm //================ case PlasmaOneNorm: case PlasmaInfNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); if (uplo == PlasmaLower) { for (int n = 0; n < m; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_slange_aux(PlasmaOneNorm, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); plasma_core_omp_slange_aux(PlasmaInfNorm, mvam, nvan, A(m, n), ldam, &work[A.n*n+m*A.nb], sequence, request); } } else { // PlasmaUpper for (int n = m+1; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_slange_aux(PlasmaOneNorm, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); plasma_core_omp_slange_aux(PlasmaInfNorm, mvam, nvan, A(m, n), ldam, &work[A.n*n+m*A.nb], sequence, request); } } plasma_core_omp_slansy_aux(PlasmaOneNorm, uplo, mvam, A(m, m), ldam, &work[A.n*m+m*A.nb], sequence, request); } #pragma omp taskwait workspace = work + A.mt*A.n; plasma_core_omp_slange(PlasmaInfNorm, A.n, A.mt, work, A.n, workspace, value, sequence, request); break; //====================== // PlasmaFrobeniusNorm //====================== case PlasmaFrobeniusNorm: scale = work; sumsq = work + A.mt*A.nt; for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); if (uplo == PlasmaLower) { for (int n = 0; n < m; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_sgessq(mvam, nvan, A(m, n), ldam, &scale[A.mt*n+m], &sumsq[A.mt*n+m], sequence, request); } } else { // PlasmaUpper for (int n = m+1; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_sgessq(mvam, nvan, A(m, n), ldam, &scale[A.mt*m+n], &sumsq[A.mt*m+n], sequence, request); } } plasma_core_omp_ssyssq(uplo, mvam, A(m, m), ldam, &scale[A.mt*m+m], &sumsq[A.mt*m+m], sequence, request); } #pragma omp taskwait plasma_core_omp_ssyssq_aux(A.mt, A.nt, scale, sumsq, value, sequence, request); break; } }
ch_ompss.c
#include "ch_common.h" #include "../extrae.h" #include "../timing.h" /** * TODO: What is the lower bound for a circular deadlock? (0 waits for 1 waits for 2 waits for 0) * Example: Execution order on 1 is reversed: * 0 waits for 1/2/3, * 1 waits for 3/2/0, * 2 waits for 0/1/3, * 3 waits for 0/1/2 * OR * 0 waits for 1/2/3/4, * 1 waits for 0/2/3/4, * 2 waits for 4/3/2/0, * 3 waits for 0/1/2/4, * 4 waits for 0/1/2/3 * OR * 0 waits for 1/2/3/4/5, * 1 waits for 0/2/3/4/5, * 2 waits for 5/4/3/2/0, * 3 waits for 0/1/2/4/5, * 4 waits for 0/1/2/3/5, * 5 waits for 0/1/2/3/5 * * NOTE: circular dependencies may happen if at least one of the inner ranks * (1 or 2, not 0 or 3) reverse their order * HYPOTHESIS: we need at least (p-(p/2)) (ceil(0.5p)) threads to avoid deadlock from reversal * Generalization to some ordered graph traversal problem? */ void cholesky_mpi(const int ts, const int nt, double *A[nt][nt], double *B, double *C[nt], int *block_rank) { int *send_blocks = malloc((nt) * sizeof(int)); int *recv_blocks = malloc((nt) * sizeof(int)); REGISTER_EXTRAE(); #pragma omp parallel { #pragma omp single { INIT_TIMING(omp_get_num_threads()); char dst_sentinels[np]; START_TIMING(TIME_TOTAL); { START_TIMING(TIME_CREATE); for (int k = 0; k < nt; k++) { if (block_rank[k*nt+k] == mype) { #pragma omp task out(A[k][k]) firstprivate(k) no_copy_deps { EXTRAE_ENTER(EVENT_POTRF); START_TIMING(TIME_POTRF); omp_potrf(A[k][k], ts, ts); END_TIMING(TIME_POTRF); EXTRAE_EXIT(EVENT_POTRF); } } if (block_rank[k*nt+k] == mype && np != 1) { #pragma omp task in(A[k][k]) firstprivate(k) no_copy_deps untied { START_TIMING(TIME_COMM); MPI_Request *reqs = NULL; int nreqs = 0; char send_flags[np]; reset_send_flags(send_flags); for (int kk = k+1; kk < nt; kk++) { if (!send_flags[block_rank[k*nt+kk]]) { ++nreqs; send_flags[block_rank[k*nt+kk]] = 1; } } reqs = malloc(sizeof(MPI_Request)*nreqs); nreqs = 0; for (int dst = 0; dst < np; dst++) { if (send_flags[dst] && dst != mype) { MPI_Request send_req; MPI_Isend(A[k][k], ts*ts, MPI_DOUBLE, dst, k*nt+k, MPI_COMM_WORLD, &send_req); reqs[nreqs++] = send_req; } } waitall(reqs, nreqs); free(reqs); END_TIMING(TIME_COMM); } } if (block_rank[k*nt+k] != mype) { #pragma omp task out(B) firstprivate(k) no_copy_deps untied { START_TIMING(TIME_COMM); int recv_flag = 0; for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype) { recv_flag = 1; break; } } if (recv_flag) { MPI_Request recv_req; MPI_Irecv(B, ts*ts, MPI_DOUBLE, block_rank[k*nt+k], k*nt+k, MPI_COMM_WORLD, &recv_req); waitall(&recv_req, 1); } END_TIMING(TIME_COMM); } } for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype) { if (block_rank[k*nt+k] == mype) { #pragma omp task in(A[k][k]) out(A[k][i]) firstprivate(k, i) no_copy_deps { EXTRAE_ENTER(EVENT_TRSM); START_TIMING(TIME_TRSM); omp_trsm(A[k][k], A[k][i], ts, ts); END_TIMING(TIME_TRSM); EXTRAE_EXIT(EVENT_TRSM); } } else { #pragma omp task in(B) out(A[k][i]) firstprivate(k, i) no_copy_deps { EXTRAE_ENTER(EVENT_TRSM); START_TIMING(TIME_TRSM); omp_trsm(B, A[k][i], ts, ts); END_TIMING(TIME_TRSM); EXTRAE_EXIT(EVENT_TRSM); } } } } for (int dst = 0; dst < np; dst++) { if (dst == mype) continue; int send_cnt = 0; int recv_cnt = 0; // populate list of blocks to send/recv to/from this unit for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype && np != 1) { int send_flag = 0; for (int ii = k + 1; ii < i; ii++) { if (!send_flag && block_rank[ii*nt+i] == dst) { send_flag = 1; break; } } for (int ii = i + 1; ii < nt; ii++) { if (!send_flag && block_rank[i*nt+ii] == dst) { send_flag = 1; break; } } if (!send_flag && block_rank[i*nt+i] == dst) send_flag = 1; if (send_flag) { send_blocks[send_cnt++] = i; } } if (block_rank[k*nt+i] != mype && block_rank[k*nt+i] == dst) { int recv_flag = 0; for (int ii = k + 1; ii < i; ii++) { if (block_rank[ii*nt+i] == mype) recv_flag = 1; } for (int ii = i + 1; ii < nt; ii++) { if (block_rank[i*nt+ii] == mype) recv_flag = 1; } if (block_rank[i*nt+i] == mype) recv_flag = 1; if (recv_flag) { recv_blocks[recv_cnt++] = i; } } } //printf("send_cnt: %d, recv_cnt: %d, blocks: %d\n", send_cnt, recv_cnt, (nt-(k+1))); // NOTE: we have to wait for all of the above tasks using comm_sentinel // dependency iterators might help here #pragma omp task no_copy_deps firstprivate(k, dst) out({C[recv_blocks[it]], it=0;recv_cnt}) in({A[k][send_blocks[it]], it=0;send_cnt}) untied { START_TIMING(TIME_COMM); int nreqs = 0; // upper bound in case all our blocks have to be sent int max_req = (nt-k); MPI_Request *reqs = malloc(sizeof(*reqs)*max_req); for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype && np != 1) { int send_flag = 0; for (int ii = k + 1; ii < i; ii++) { if (!send_flag && block_rank[ii*nt+i] == dst) { send_flag = 1; } } for (int ii = i + 1; ii < nt; ii++) { if (!send_flag && block_rank[i*nt+ii] == dst) { send_flag = 1; } } if (!send_flag && block_rank[i*nt+i] == dst) send_flag = 1; if (send_flag) { MPI_Request send_req; MPI_Isend(A[k][i], ts*ts, MPI_DOUBLE, dst, k*nt+i, MPI_COMM_WORLD, &send_req); reqs[nreqs++] = send_req; } } if (block_rank[k*nt+i] != mype && block_rank[k*nt+i] == dst) { int recv_flag = 0; for (int ii = k + 1; ii < i; ii++) { if (block_rank[ii*nt+i] == mype) recv_flag = 1; } for (int ii = i + 1; ii < nt; ii++) { if (block_rank[i*nt+ii] == mype) recv_flag = 1; } if (block_rank[i*nt+i] == mype) recv_flag = 1; if (recv_flag) { MPI_Request recv_req; MPI_Irecv(C[i], ts*ts, MPI_DOUBLE, block_rank[k*nt+i], k*nt+i, MPI_COMM_WORLD, &recv_req); reqs[nreqs++] = recv_req; } } } //printf("Waiting for trsm blocks from %d in k=%d\n", dst, k); waitall(reqs, nreqs); free(reqs); END_TIMING(TIME_COMM); } } for (int i = k + 1; i < nt; i++) { for (int j = k + 1; j < i; j++) { if (block_rank[j*nt+i] == mype) { if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] == mype) { #pragma omp task in(A[k][i], A[k][j]) out(A[j][i]) firstprivate(k, j, i) no_copy_deps { EXTRAE_ENTER(EVENT_GEMM); START_TIMING(TIME_GEMM); omp_gemm(A[k][i], A[k][j], A[j][i], ts, ts); END_TIMING(TIME_GEMM); EXTRAE_EXIT(EVENT_GEMM); } } else if (block_rank[k*nt+i] != mype && block_rank[k*nt+j] == mype) { #pragma omp task in(A[k][j], C[i]) out(A[j][i]) firstprivate(k, j, i) no_copy_deps { EXTRAE_ENTER(EVENT_GEMM); START_TIMING(TIME_GEMM); omp_gemm(C[i], A[k][j], A[j][i], ts, ts); END_TIMING(TIME_GEMM); EXTRAE_EXIT(EVENT_GEMM); } } else if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] != mype) { // TODO: the content of C[j] may be overwritten but we cannot specify a dependency on it :( #pragma omp task in(A[k][i], C[j]) out(A[j][i]) firstprivate(k, j, i) no_copy_deps { EXTRAE_ENTER(EVENT_GEMM); START_TIMING(TIME_GEMM); omp_gemm(A[k][i], C[j], A[j][i], ts, ts); END_TIMING(TIME_GEMM); EXTRAE_EXIT(EVENT_GEMM); } } else { #pragma omp task in(C[i], C[j]) out(A[j][i]) firstprivate(k, j, i) no_copy_deps { EXTRAE_ENTER(EVENT_GEMM); START_TIMING(TIME_GEMM); omp_gemm(C[i], C[j], A[j][i], ts, ts); END_TIMING(TIME_GEMM); EXTRAE_EXIT(EVENT_GEMM); } } } } if (block_rank[i*nt+i] == mype) { if (block_rank[k*nt+i] == mype) { #pragma omp task in(A[k][i]) out(A[i][i]) firstprivate(k, i) no_copy_deps { EXTRAE_ENTER(EVENT_SYRK); START_TIMING(TIME_SYRK); omp_syrk(A[k][i], A[i][i], ts, ts); END_TIMING(TIME_SYRK); EXTRAE_EXIT(EVENT_SYRK); } } else { #pragma omp task in(C[i]) out(A[i][i]) firstprivate(k, i) no_copy_deps { EXTRAE_ENTER(EVENT_SYRK); START_TIMING(TIME_SYRK); omp_syrk(C[i], A[i][i], ts, ts); END_TIMING(TIME_SYRK); EXTRAE_EXIT(EVENT_SYRK); } } } } } END_TIMING(TIME_CREATE); } #pragma omp taskwait END_TIMING(TIME_TOTAL); MPI_Barrier(MPI_COMM_WORLD); PRINT_TIMINGS(); FREE_TIMING(); }// pragma omp single }// pragma omp parallel free(send_blocks); free(recv_blocks); }
GB_AxB_rowscale_meta.c
//------------------------------------------------------------------------------ // GB_AxB_rowscale_meta: C=D*B where D is a square diagonal matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // All entries in C=D*B are computed entirely in parallel. // B and C can be jumbled. D cannot, but it is a diagonal matrix so it is // never jumbled. { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- // Bx is unused if the operator is FIRST or PAIR #include "GB_unused.h" ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (!GB_JUMBLED (D)) ; ASSERT (GB_JUMBLED_OK (B)) ; //-------------------------------------------------------------------------- // get C, D, and B //-------------------------------------------------------------------------- const GB_ATYPE *restrict Dx = (GB_ATYPE *) (D_is_pattern ? NULL : D->x) ; const GB_BTYPE *restrict Bx = (GB_BTYPE *) (B_is_pattern ? NULL : B->x) ; const int64_t *restrict Bi = B->i ; const int64_t bnz = GB_IS_FULL (B) ? GB_NNZ_FULL (B) : GB_NNZ (B) ; const int64_t bvlen = B->vlen ; //-------------------------------------------------------------------------- // C=D*B //-------------------------------------------------------------------------- int ntasks = nthreads ; ntasks = GB_IMIN (bnz, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < ntasks ; tid++) { int64_t pstart, pend ; GB_PARTITION (pstart, pend, bnz, tid, ntasks) ; GB_PRAGMA_SIMD_VECTORIZE for (int64_t p = pstart ; p < pend ; p++) { int64_t i = GBI (Bi, p, bvlen) ; // get row index of B(i,j) GB_GETA (dii, Dx, i) ; // dii = D(i,i) GB_GETB (bij, Bx, p) ; // bij = B(i,j) GB_BINOP (GB_CX (p), dii, bij, 0, 0) ; // C(i,j) = dii*bij } } }
parallel_for.h
// //--------------------------------------------------------------------------- // // Copyright(C) 2017 Alexey Lysiuk // All rights reserved. // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with this program. If not, see http://www.gnu.org/licenses/ // //-------------------------------------------------------------------------- // #pragma once #ifndef PARALLEL_FOR_H_INCLUDED #define PARALLEL_FOR_H_INCLUDED #ifdef HAVE_PARALLEL_FOR #include <ppl.h> template <typename Index, typename Function> inline void parallel_for(const Index first, const Index last, const Index step, const Function& function) { concurrency::parallel_for(first, last, step, function); } #elif defined HAVE_DISPATCH_APPLY #include <dispatch/dispatch.h> template <typename Index, typename Function> inline void parallel_for(const Index first, const Index last, const Index step, const Function& function) { const dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dispatch_apply((last - first) / step + 1, queue, ^(size_t slice) { function(slice * step); }); } #else // Generic loop with optional OpenMP parallelization template <typename Index, typename Function> inline void parallel_for(const Index first, const Index last, const Index step, const Function& function) { #pragma omp parallel for for (Index i = first; i < last; i += step) { function(i); } } #endif // HAVE_PARALLEL_FOR template <typename Index, typename Function> inline void parallel_for(const Index count, const Function& function) { parallel_for(0, count, 1, function); } template <typename Index, typename Function> inline void parallel_for(const Index count, const Index step, const Function& function) { parallel_for(0, count, step, function); } #endif // PARALLEL_FOR_H_INCLUDED
compiler_cgen.c
/* Generated by Nim Compiler v0.15.0 */ /* (c) 2016 Andreas Rumpf */ /* The generated code is subject to the original license. */ #define NIM_INTBITS 32 #include "nimbase.h" #include <string.h> typedef struct Tcgen527027 Tcgen527027; typedef struct TNimType TNimType; typedef struct TNimNode TNimNode; typedef struct Ropeobj177006 Ropeobj177006; typedef struct NimStringDesc NimStringDesc; typedef struct TGenericSeq TGenericSeq; typedef struct Cell46904 Cell46904; typedef struct Cellseq46920 Cellseq46920; typedef struct Gcheap49418 Gcheap49418; typedef struct Gcstack49416 Gcstack49416; typedef struct Memregion29085 Memregion29085; typedef struct Smallchunk29039 Smallchunk29039; typedef struct Llchunk29079 Llchunk29079; typedef struct Bigchunk29041 Bigchunk29041; typedef struct Intset29014 Intset29014; typedef struct Trunk29010 Trunk29010; typedef struct Avlnode29083 Avlnode29083; typedef struct Gcstat49414 Gcstat49414; typedef struct Cellset46916 Cellset46916; typedef struct Pagedesc46912 Pagedesc46912; typedef struct Ttypeseq290836 Ttypeseq290836; typedef struct Ttype290840 Ttype290840; typedef struct Intset266030 Intset266030; typedef struct Trunk266026 Trunk266026; typedef struct Trunkseq266028 Trunkseq266028; typedef struct Tpasscontext339002 Tpasscontext339002; typedef struct Tsym290834 Tsym290834; typedef struct Tidobj197004 Tidobj197004; typedef struct TNimObject TNimObject; typedef struct TY290929 TY290929; typedef struct Tstrtable290806 Tstrtable290806; typedef struct Tsymseq290804 Tsymseq290804; typedef struct Tident197010 Tident197010; typedef struct Tlineinfo189336 Tlineinfo189336; typedef struct Tnode290802 Tnode290802; typedef struct Tloc290816 Tloc290816; typedef struct Tlib290820 Tlib290820; typedef struct TY527153 TY527153; typedef struct TY201018 TY201018; typedef struct Tidtable290850 Tidtable290850; typedef struct Tidpairseq290848 Tidpairseq290848; typedef struct Tlinkedlist147013 Tlinkedlist147013; typedef struct Tlistentry147007 Tlistentry147007; typedef struct Tcproc527021 Tcproc527021; typedef struct Tnodetable290862 Tnodetable290862; typedef struct Tnodepairseq290860 Tnodepairseq290860; typedef struct Debuginfo201009 Debuginfo201009; typedef struct TY201021 TY201021; typedef struct TY201023 TY201023; typedef struct Tnodeseq290796 Tnodeseq290796; typedef struct TY189350 TY189350; typedef struct TY527095 TY527095; typedef struct Trodreader330021 Trodreader330021; typedef struct TY290960 TY290960; typedef struct TY201017 TY201017; typedef struct Enumdesc201007 Enumdesc201007; typedef struct Tinfocc271008 Tinfocc271008; typedef struct Tblock527019 Tblock527019; typedef struct Ttraversalclosure535019 Ttraversalclosure535019; typedef struct TY134602 TY134602; typedef struct Tbitset337004 Tbitset337004; typedef struct TY189612 TY189612; typedef struct Tfileinfo189334 Tfileinfo189334; typedef struct Tinfoos175035 Tinfoos175035; typedef struct Tinfocpu175476 Tinfocpu175476; typedef struct Tstrentry147009 Tstrentry147009; typedef struct TY124315 TY124315; typedef struct Basechunk29037 Basechunk29037; typedef struct Freecell29029 Freecell29029; typedef struct Tinstantiation290824 Tinstantiation290824; typedef struct Tidpair290846 Tidpair290846; typedef struct Tnodepair290858 Tnodepair290858; typedef struct Filenamemapping201005 Filenamemapping201005; typedef struct TY330033 TY330033; typedef struct Tindex330019 Tindex330019; typedef struct Tiitable297142 Tiitable297142; typedef struct Tiipairseq297140 Tiipairseq297140; typedef struct Table330054 Table330054; typedef struct Keyvaluepairseq330057 Keyvaluepairseq330057; typedef struct Memfile328202 Memfile328202; typedef struct TY290961 TY290961; typedef struct Tiipair297138 Tiipair297138; typedef struct Keyvaluepair330060 Keyvaluepair330060; typedef NU8 Tnimkind3403; typedef NU8 Tnimtypeflag3409Set; typedef N_NIMCALL_PTR(void, TY3489) (void* p0, NI op0); typedef N_NIMCALL_PTR(void*, TY3494) (void* p0); struct TNimType { NI size; Tnimkind3403 kind; Tnimtypeflag3409Set flags; TNimType* base; TNimNode* node; void* finalizer; TY3489 marker; TY3494 deepcopy; }; typedef NU8 Tnimnodekind3405; struct TNimNode { Tnimnodekind3405 kind; NI offset; TNimType* typ; NCSTRING name; NI len; TNimNode** sons; }; typedef N_NIMCALL_PTR(void, Globalmarkerproc55402) (void); struct TGenericSeq { NI len; NI reserved; }; struct NimStringDesc { TGenericSeq Sup; NIM_CHAR data[SEQ_DECL_SIZE]; }; struct Cell46904 { NI refcount; TNimType* typ; }; struct Cellseq46920 { NI len; NI cap; Cell46904** d; }; typedef Smallchunk29039* TY29100[512]; typedef Trunk29010* Trunkbuckets29012[256]; struct Intset29014 { Trunkbuckets29012 data; }; struct Memregion29085 { NI minlargeobj; NI maxlargeobj; TY29100 freesmallchunks; Llchunk29079* llmem; NI currmem; NI maxmem; NI freemem; NI lastsize; Bigchunk29041* freechunkslist; Intset29014 chunkstarts; Avlnode29083* root; Avlnode29083* deleted; Avlnode29083* last; Avlnode29083* freeavlnodes; NIM_BOOL locked; }; struct Gcstat49414 { NI stackscans; NI cyclecollections; NI maxthreshold; NI maxstacksize; NI maxstackcells; NI cycletablesize; NI64 maxpause; }; struct Cellset46916 { NI counter; NI max; Pagedesc46912* head; Pagedesc46912** data; }; struct Gcheap49418 { Gcstack49416* stack; void* stackbottom; NI cyclethreshold; Cellseq46920 zct; Cellseq46920 decstack; Cellseq46920 tempstack; NI recgclock; Memregion29085 region; Gcstat49414 stat; Cellset46916 marked; Cellseq46920 additionalroots; }; struct Intset266030 { NI counter; NI max; Trunk266026* head; Trunkseq266028* data; }; struct TNimObject { TNimType* m_type; }; struct Tidobj197004 { TNimObject Sup; NI id; }; typedef NU8 Tsymkind290435; struct Tstrtable290806 { NI counter; Tsymseq290804* data; }; typedef NU16 Tmagic290524; struct Tlineinfo189336 { NI16 line; NI16 col; NI32 fileindex; }; typedef NU32 Tsymflag290184Set; typedef NU32 Toption168009Set; typedef NU8 Tlockind290808; typedef NU8 Tstorageloc290812; typedef NU16 Tlocflag290810Set; struct Tloc290816 { Tlockind290808 k; Tstorageloc290812 s; Tlocflag290810Set flags; Ttype290840* t; Ropeobj177006* r; }; struct Tsym290834 { Tidobj197004 Sup; Tsymkind290435 kind; union{ struct {Ttypeseq290836* typeinstcache; } S1; struct {TY290929* procinstcache; Tsym290834* gcunsafetyreason; } S2; struct {TY290929* usedgenerics; Tstrtable290806 tab; } S3; struct {Tsym290834* guard; NI bitsize; } S4; } kindU; Tmagic290524 magic; Ttype290840* typ; Tident197010* name; Tlineinfo189336 info; Tsym290834* owner; Tsymflag290184Set flags; Tnode290802* ast; Toption168009Set options; NI position; NI offset; Tloc290816 loc; Tlib290820* annex; Tnode290802* constraint; }; struct TY201018 { NimStringDesc* Field0; NI Field1; }; struct Tpasscontext339002 { TNimObject Sup; NIM_BOOL fromcache; }; typedef Ropeobj177006* Tcfilesections527009[18]; typedef NU8 Codegenflag527025Set; struct Tidtable290850 { NI counter; Tidpairseq290848* data; }; struct Tlinkedlist147013 { Tlistentry147007* head; Tlistentry147007* tail; NI counter; }; struct Tnodetable290862 { NI counter; Tnodepairseq290860* data; }; typedef Ropeobj177006* TY527136[10]; struct Tcgen527027 { Tpasscontext339002 Sup; Tcfilesections527009 s; Codegenflag527025Set flags; Tsym290834* module; NimStringDesc* filename; NimStringDesc* cfilename; Ropeobj177006* tmpbase; Tidtable290850 typecache; Tidtable290850 forwtypecache; Intset266030 declaredthings; Intset266030 declaredprotos; Tlinkedlist147013 headerfiles; Intset266030 typeinfomarker; Tcproc527021* initproc; Tcproc527021* postinitproc; Tcproc527021* preinitproc; Ttypeseq290836* typestack; Tnodetable290862 datacache; Tsymseq290804* forwardedprocs; NI typenodes; NI nimtypes; Ropeobj177006* typenodesname; Ropeobj177006* nimtypesname; NI labels; TY527136 extensionloaders; Ropeobj177006* injectstmt; }; struct Debuginfo201009 { NI version; TY201021* files; TY201023* enums; NIM_BOOL conflicts; }; struct Tident197010 { Tidobj197004 Sup; NimStringDesc* s; Tident197010* next; NI h; }; struct Tcproc527021 { Tsym290834* prc; NIM_BOOL beforeretneeded; NIM_BOOL threadvaraccessed; Tlineinfo189336 lastlineinfo; Tnodeseq290796* nestedtrystmts; NI inexceptblock; TY189350* finallysafepoints; NI labels; TY527095* blocks; NI breakidx; Toption168009Set options; NI maxframelen; Tcgen527027* module; NI withinloop; NI splitdecls; NI gcframeid; Ropeobj177006* gcframetype; }; typedef NU8 Tsymflag290184; typedef NU8 Codegenflag527025; typedef NU8 Toption168009; typedef NU64 Tglobaloption168013Set; typedef NU8 Tglobaloption168013; typedef NU8 Tcommands168076; typedef NU16 Tnodeflag290427Set; typedef NU8 Tnodekind290020; struct Tnode290802 { Ttype290840* typ; Tlineinfo189336 info; Tnodeflag290427Set flags; Tnodekind290020 kind; union{ struct {NI64 intval; } S1; struct {NF floatval; } S2; struct {NimStringDesc* strval; } S3; struct {Tsym290834* sym; } S4; struct {Tident197010* ident; } S5; struct {Tnodeseq290796* sons; } S6; } kindU; NimStringDesc* comment; }; typedef Ropeobj177006* TY531289[1]; typedef NU8 Tlocflag290810; struct Tlistentry147007 { TNimObject Sup; Tlistentry147007* prev; Tlistentry147007* next; }; typedef NU8 Tlibkind290818; struct Tlib290820 { Tlistentry147007 Sup; Tlibkind290818 kind; NIM_BOOL generated; NIM_BOOL isoverriden; Ropeobj177006* name; Tnode290802* path; }; typedef NU8 Tcfilesection527005; typedef NU8 Ttypekind290244; typedef NU8 Tcallingconvention290002; typedef NU32 Ttypeflag290431Set; struct Ttype290840 { Tidobj197004 Sup; Ttypekind290244 kind; Tcallingconvention290002 callconv; Ttypeflag290431Set flags; Ttypeseq290836* sons; Tnode290802* n; Tsym290834* owner; Tsym290834* sym; Tsym290834* destructor; Tsym290834* deepcopy; Tsym290834* assignment; TY290960* methods; NI64 size; NI16 align; NI16 locklevel; Tloc290816 loc; }; typedef Ropeobj177006* TY530811[2]; typedef NU8 Tctypekind527007; typedef NU64 Ttypekind290244Set; typedef NU8 Ttypeflag290431; typedef NimStringDesc* TY531943[14]; typedef NU8 Tprefereddesc318011; typedef Ropeobj177006* TY177507[1]; struct Enumdesc201007 { NI size; NU32 owner; NI id; NimStringDesc* name; TY201017* values; }; typedef Ropeobj177006* TY533235[4]; typedef NimStringDesc* TY290016[10]; typedef Ropeobj177006* TY533238[3]; struct Ropeobj177006 { TNimObject Sup; Ropeobj177006* left; Ropeobj177006* right; NI length; NimStringDesc* data; }; typedef NU8 Tinfoccprop271004Set; struct Tinfocc271008 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; NimStringDesc* Field3; NimStringDesc* Field4; NimStringDesc* Field5; NimStringDesc* Field6; NimStringDesc* Field7; NimStringDesc* Field8; NimStringDesc* Field9; NimStringDesc* Field10; NimStringDesc* Field11; NimStringDesc* Field12; NimStringDesc* Field13; NimStringDesc* Field14; NimStringDesc* Field15; NimStringDesc* Field16; NimStringDesc* Field17; NimStringDesc* Field18; NimStringDesc* Field19; Tinfoccprop271004Set Field20; }; typedef Tinfocc271008 TY271427[13]; typedef NU8 Tsystemcc271002; typedef NU8 Tnodeflag290427; typedef NU8 Tcprocsection527011; typedef Ropeobj177006* Tcprocsections527013[3]; struct Tblock527019 { NI id; Ropeobj177006* label; Tcprocsections527013 sections; NIM_BOOL isloop; NI16 nestedtrystmts; NI16 nestedexceptstmts; NI16 framelen; }; typedef NU8 Tgcmode168080; typedef NU8 Ttypeinforeason535016; struct Ttraversalclosure535019 { Tcproc527021* p; NimStringDesc* visitorfrmt; }; typedef NU8 Ttypefieldresult318145; typedef NU8 Tinfoccprop271004; typedef Ropeobj177006* TY534847[6]; typedef Ropeobj177006* TY534401[7]; typedef Ropeobj177006* TY534475[5]; typedef NU16 Tmsgkind189002; typedef NU8 Tassignmentflag536302Set; typedef NU8 Tassignmentflag536302; typedef NimStringDesc* TY550655[19]; typedef NimStringDesc* TY549642[3]; typedef NimStringDesc* TY554764[4]; typedef NimStringDesc* TY549828[42]; typedef NimStringDesc* TY549281[7]; typedef NU8 Trenderflag309004Set; typedef NimStringDesc* TY555052[2]; typedef NU8 Tclosuretypekind533679; typedef NimStringDesc* TY554428[6]; typedef NU8 Tanalysisresult471003; typedef NU8 char136Set[32]; typedef NU8 Tdistinctcompare322427; typedef NU8 Ttypecmpflag322429Set; typedef NU16 Tspecialword273003; typedef NU8 Tsystemos175004; struct Tfileinfo189334 { NimStringDesc* fullpath; NimStringDesc* projpath; NimStringDesc* shortname; Ropeobj177006* quotedname; Ropeobj177006* quotedfullname; TY189350* lines; NimStringDesc* dirtyfile; }; typedef NU8 Tinfoosprop175031Set; struct Tinfoos175035 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; NimStringDesc* Field3; NimStringDesc* Field4; NimStringDesc* Field5; NimStringDesc* Field6; NimStringDesc* Field7; NimStringDesc* Field8; NimStringDesc* Field9; NimStringDesc* Field10; NimStringDesc* Field11; Tinfoosprop175031Set Field12; }; typedef Tinfoos175035 TY175082[24]; typedef NU8 Tendian175474; struct Tinfocpu175476 { NimStringDesc* Field0; NI Field1; Tendian175474 Field2; NI Field3; NI Field4; }; typedef Tinfocpu175476 TY175510[19]; typedef NU8 Tsystemcpu175452; struct Tstrentry147009 { Tlistentry147007 Sup; NimStringDesc* data; }; struct TY124315 { NimStringDesc* Field0; NimStringDesc* Field1; NimStringDesc* Field2; }; struct Gcstack49416 { Gcstack49416* prev; Gcstack49416* next; void* starts; void* pos; NI maxstacksize; }; struct Basechunk29037 { NI prevsize; NI size; NIM_BOOL used; }; struct Smallchunk29039 { Basechunk29037 Sup; Smallchunk29039* next; Smallchunk29039* prev; Freecell29029* freelist; NI free; NI acc; NF data; }; struct Llchunk29079 { NI size; NI acc; Llchunk29079* next; }; struct Bigchunk29041 { Basechunk29037 Sup; Bigchunk29041* next; Bigchunk29041* prev; NI align; NF data; }; typedef NI TY29018[16]; struct Trunk29010 { Trunk29010* next; NI key; TY29018 bits; }; typedef Avlnode29083* TY29090[2]; struct Avlnode29083 { TY29090 link; NI key; NI upperbound; NI level; }; struct Pagedesc46912 { Pagedesc46912* next; NI key; TY29018 bits; }; struct Trunk266026 { Trunk266026* next; NI key; TY29018 bits; }; struct Tidpair290846 { Tidobj197004* key; TNimObject* val; }; struct Tnodepair290858 { NI h; Tnode290802* key; NI val; }; struct Filenamemapping201005 { NimStringDesc* package; NimStringDesc* file; NU32 mangled; }; typedef NU8 Treasonforrecompile330002; struct Tiitable297142 { NI counter; Tiipairseq297140* data; }; struct Tindex330019 { NI lastidxkey; NI lastidxval; Tiitable297142 tab; NimStringDesc* r; NI offset; }; struct Table330054 { Keyvaluepairseq330057* data; NI counter; }; struct Memfile328202 { void* mem; NI size; NI fhandle; NI maphandle; NIM_BOOL wasopened; }; struct Trodreader330021 { TNimObject Sup; NI pos; NCSTRING s; Toption168009Set options; Treasonforrecompile330002 reason; TY330033* moddeps; TY330033* files; NI dataidx; NI convertersidx; NI initidx; NI interfidx; NI compilerprocsidx; NI methodsidx; NimStringDesc* filename; Tindex330019 index; Tindex330019 imports; NI readerindex; NI line; NI moduleid; Table330054 syms; Memfile328202 memfile; Tsymseq290804* methods; NimStringDesc* origfile; NIM_BOOL inviewmode; }; struct TY290961 { NI Field0; Tsym290834* Field1; }; struct Freecell29029 { Freecell29029* next; NI zerofield; }; struct Tinstantiation290824 { Tsym290834* sym; Ttypeseq290836* concretetypes; NI compilesid; }; struct Tiipair297138 { NI key; NI val; }; struct Keyvaluepair330060 { NI Field0; NI Field1; Tsym290834* Field2; }; struct Ttypeseq290836 { TGenericSeq Sup; Ttype290840* data[SEQ_DECL_SIZE]; }; struct TY527153 { TGenericSeq Sup; Tcgen527027* data[SEQ_DECL_SIZE]; }; struct Tsymseq290804 { TGenericSeq Sup; Tsym290834* data[SEQ_DECL_SIZE]; }; struct TY201017 { TGenericSeq Sup; TY201018 data[SEQ_DECL_SIZE]; }; struct TY134602 { TGenericSeq Sup; NimStringDesc* data[SEQ_DECL_SIZE]; }; struct Tbitset337004 { TGenericSeq Sup; NI8 data[SEQ_DECL_SIZE]; }; struct TY527095 { TGenericSeq Sup; Tblock527019 data[SEQ_DECL_SIZE]; }; struct TY189350 { TGenericSeq Sup; Ropeobj177006* data[SEQ_DECL_SIZE]; }; struct Tnodeseq290796 { TGenericSeq Sup; Tnode290802* data[SEQ_DECL_SIZE]; }; struct TY189612 { TGenericSeq Sup; Tfileinfo189334 data[SEQ_DECL_SIZE]; }; struct Trunkseq266028 { TGenericSeq Sup; Trunk266026* data[SEQ_DECL_SIZE]; }; struct TY290929 { TGenericSeq Sup; Tinstantiation290824* data[SEQ_DECL_SIZE]; }; struct Tidpairseq290848 { TGenericSeq Sup; Tidpair290846 data[SEQ_DECL_SIZE]; }; struct Tnodepairseq290860 { TGenericSeq Sup; Tnodepair290858 data[SEQ_DECL_SIZE]; }; struct TY201021 { TGenericSeq Sup; Filenamemapping201005 data[SEQ_DECL_SIZE]; }; struct TY201023 { TGenericSeq Sup; Enumdesc201007 data[SEQ_DECL_SIZE]; }; struct TY290960 { TGenericSeq Sup; TY290961 data[SEQ_DECL_SIZE]; }; struct TY330033 { TGenericSeq Sup; NI32 data[SEQ_DECL_SIZE]; }; struct Tiipairseq297140 { TGenericSeq Sup; Tiipair297138 data[SEQ_DECL_SIZE]; }; struct Keyvaluepairseq330057 { TGenericSeq Sup; Keyvaluepair330060 data[SEQ_DECL_SIZE]; }; N_NIMCALL(void, nimGCvisit)(void* d0, NI op0); N_NIMCALL(void, T839829468_2)(void); N_NIMCALL(void, nimRegisterGlobalMarker)(Globalmarkerproc55402 markerproc0); N_NIMCALL(void, T839829468_3)(void); N_NIMCALL(Ropeobj177006*, rope_177277_2381377266)(NimStringDesc* s0); static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0); static N_INLINE(Cell46904*, usrtocell_51040_1689653243)(void* usr0); static N_INLINE(void, rtladdzct_52201_1689653243)(Cell46904* c0); N_NOINLINE(void, addzct_51017_1689653243)(Cellseq46920* s0, Cell46904* c0); N_NIMCALL(void, T839829468_5)(void); N_NIMCALL(void, T839829468_6)(void); static N_INLINE(void, nimGCunrefNoCycle)(void* p0); N_NIMCALL(void*, newSeqRC1)(TNimType* typ0, NI len0); N_NIMCALL(void, T839829468_7)(void); N_NIMCALL(void, initintset_266885_2627731572)(Intset266030* Result); N_NOINLINE(void, chckNil)(void* p0); N_NIMCALL(void, genericReset)(void* dest0, TNimType* mt0); N_NIMCALL(void, T839829468_8)(void); N_NIMCALL(Tcgen527027*, newmodule_561045_839829468)(Tsym290834* module0); N_NIMCALL(Tcgen527027*, getcgenmodule_530226_839829468)(Tsym290834* s0); N_NIMCALL(void, internalerror_194113_155036129)(NimStringDesc* errmsg0); N_NIMCALL(NimStringDesc*, HEX24_194185_1689653243)(TY201018 x0); N_NIMCALL(Tcgen527027*, rawnewmodule_561038_839829468)(Tsym290834* module0); N_NIMCALL(Tcgen527027*, rawnewmodule_560663_839829468)(Tsym290834* module0, NimStringDesc* filename0); N_NIMCALL(void*, newObj)(TNimType* typ0, NI size0); static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0); static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0); N_NIMCALL(NimStringDesc*, HEX24_8401_1689653243)(NU64 x0); N_NIMCALL(NU32, hashowner_530977_839829468)(Tsym290834* s0); N_NIMCALL(NU32, register_201121_1926258066)(Debuginfo201009* self0, NimStringDesc* package0, NimStringDesc* file0); N_NIMCALL(NimStringDesc*, rawNewString)(NI space0); N_NIMCALL(void, initlinkedlist_147031_3771138726)(Tlinkedlist147013* list0); N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src0); N_NIMCALL(void, initidtable_294019_850551059)(Tidtable290850* x0); N_NIMCALL(Tcproc527021*, newproc_527206_3723162438)(Tsym290834* prc0, Tcgen527027* module0); static N_INLINE(void, asgnRef)(void** dest0, void* src0); static N_INLINE(void, incref_53019_1689653243)(Cell46904* c0); static N_INLINE(void, decref_52601_1689653243)(Cell46904* c0); N_NIMCALL(Toption168009Set, initprocoptions_560635_839829468)(Tcgen527027* m0); N_NIMCALL(Tcproc527021*, newpreinitproc_560625_839829468)(Tcgen527027* m0); N_NIMCALL(Tcproc527021*, newpostinitproc_560630_839829468)(Tcgen527027* m0); N_NIMCALL(void, initnodetable_294085_850551059)(Tnodetable290862* x0); N_NIMCALL(Ropeobj177006*, gettempname_531596_839829468)(Tcgen527027* m0); N_NIMCALL(Ropeobj177006*, HEX26_177418_2381377266)(Ropeobj177006* a0, Ropeobj177006* b0); N_NIMCALL(Ropeobj177006*, rope_177401_2381377266)(NI64 i0); N_NIMCALL(NimStringDesc*, tofullpath_190264_155036129)(NI32 fileidx0); N_NIMCALL(TGenericSeq*, setLengthSeq)(TGenericSeq* seq0, NI elemsize0, NI newlen0); N_NIMCALL(NimStringDesc*, tofilename_190260_155036129)(NI32 fileidx0); N_NIMCALL(NimStringDesc*, noschangeFileExt)(NimStringDesc* filename0, NimStringDesc* ext0); N_NIMCALL(NimStringDesc*, completecfilepath_271854_2528170400)(NimStringDesc* cfile0, NIM_BOOL createsubdir0); N_NIMCALL(void, readmergeinfo_528613_2760143328)(NimStringDesc* cfilename0, Tcgen527027* m0); N_NIMCALL(NimStringDesc*, getcfile_561204_839829468)(Tcgen527027* m0); N_NIMCALL(NimStringDesc*, copyString)(NimStringDesc* src0); N_NIMCALL(NimStringDesc*, withpackagename_169065_2607990831)(NimStringDesc* path0); static N_INLINE(NIM_BOOL, skipcodegen_339085_2355241294)(Tnode290802* n0); N_NIMCALL(void, genstmts_537244_839829468)(Tcproc527021* p0, Tnode290802* t0); N_NIMCALL(void, expr_537248_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, fillprocloc_537201_839829468)(Tsym290834* sym0); N_NIMCALL(void, fillloc_530282_839829468)(Tloc290816* a0, Tlockind290808 k0, Ttype290840* typ0, Ropeobj177006* r0, Tstorageloc290812 s0); N_NIMCALL(void, unsureAsgnRef)(void** dest0, void* src0); N_NIMCALL(Ropeobj177006*, manglename_531205_839829468)(Tsym290834* s0); N_NIMCALL(NIM_BOOL, iskeyword_530960_839829468)(Tident197010* w0); N_NIMCALL(NimStringDesc*, mangle_526847_2036603609)(NimStringDesc* name0); N_NIMCALL(void, add_177487_2381377266)(Ropeobj177006** a0, NimStringDesc* b0); N_NIMCALL(void, add_177482_2381377266)(Ropeobj177006** a0, Ropeobj177006* b0); N_NIMCALL(Ropeobj177006*, HEX25_177905_2381377266)(NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0); N_NIMCALL(void, genprocprototype_537254_839829468)(Tcgen527027* m0, Tsym290834* sym0); N_NIMCALL(void, useheader_530369_839829468)(Tcgen527027* m0, Tsym290834* sym0); N_NIMCALL(NIM_BOOL, includestr_147249_3771138726)(Tlinkedlist147013* list0, NimStringDesc* data0); N_NIMCALL(NimStringDesc*, getstr_295230_850551059)(Tnode290802* a0); N_NIMCALL(Tsym290834*, getmodule_297123_2984716966)(Tsym290834* s0); N_NIMCALL(NIM_BOOL, containsorincl_266862_2627731572)(Intset266030* s0, NI key0); N_NIMCALL(Ropeobj177006*, ropecg_530407_839829468)(Tcgen527027* m0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0); N_NIMCALL(NimStringDesc*, nimIntToStr)(NI x0); static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0); N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI start_78810_1689653243, NI last0); N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI first0, NI last0); N_NIMCALL(Ropeobj177006*, cgsym_530403_839829468)(Tcgen527027* m0, NimStringDesc* name0); N_NIMCALL(Tsym290834*, getcompilerproc_336746_3937434831)(NimStringDesc* name0); N_NIMCALL(void, genproc_530951_839829468)(Tcgen527027* m0, Tsym290834* prc0); N_NIMCALL(NIM_BOOL, isactivated_559431_839829468)(Tsym290834* prc0); N_NIMCALL(void, addforwardedproc_530203_839829468)(Tcgen527027* m0, Tsym290834* prc0); N_NIMCALL(TGenericSeq*, incrSeqV2)(TGenericSeq* seq0, NI elemsize0); N_NIMCALL(void, genprocnoforward_558906_839829468)(Tcgen527027* m0, Tsym290834* prc0); N_NIMCALL(void, genprocaux_558284_839829468)(Tcgen527027* m0, Tsym290834* prc0); N_NIMCALL(Ropeobj177006*, genprocheader_533867_839829468)(Tcgen527027* m0, Tsym290834* prc0); N_NIMCALL(void, genclinedir_530813_839829468)(Ropeobj177006** r0, Tlineinfo189336 info0); N_NIMCALL(void, genclinedir_530725_839829468)(Ropeobj177006** r0, NimStringDesc* filename0, NI line0); N_NIMCALL(void, addf_178205_2381377266)(Ropeobj177006** c0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0); N_NIMCALL(NimStringDesc*, makesinglelinecstring_526835_2036603609)(NimStringDesc* s0); N_NIMCALL(NI, safelinenm_530721_839829468)(Tlineinfo189336 info0); static N_INLINE(NI, tolinenumber_190415_155036129)(Tlineinfo189336 info0); N_NIMCALL(void, genprocparams_532115_839829468)(Tcgen527027* m0, Ttype290840* t0, Ropeobj177006** rettype0, Ropeobj177006** params0, Intset266030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0); N_NIMCALL(NIM_BOOL, isinvalidreturntype_531548_839829468)(Ttype290840* rettype0); N_NIMCALL(Tctypekind527007, maptype_531393_839829468)(Ttype290840* typ0); N_NIMCALL(Tctypekind527007, mapsettype_531389_839829468)(Ttype290840* typ0); N_NIMCALL(NI64, getsize_318135_3876443242)(Ttype290840* typ0); N_NIMCALL(Ttype290840*, lastson_293377_850551059)(Ttype290840* n0); N_NIMCALL(NI64, firstord_318001_3876443242)(Ttype290840* t0); N_NIMCALL(Ttype290840*, skiptypes_294099_850551059)(Ttype290840* t0, Ttypekind290244Set kinds0); N_NIMCALL(NIM_BOOL, isimportedcpptype_531476_839829468)(Ttype290840* t0); N_NIMCALL(NIM_BOOL, needscomplexassignment_531509_839829468)(Ttype290840* typ0); N_NIMCALL(NIM_BOOL, containsgarbagecollectedref_318117_3876443242)(Ttype290840* typ0); static N_INLINE(NIM_BOOL, isobjlackingtypefield_531513_839829468)(Ttype290840* typ0); N_NIMCALL(NIM_BOOL, ispureobject_318138_3876443242)(Ttype290840* typ0); N_NIMCALL(Ropeobj177006*, gettypedescaux_531503_839829468)(Tcgen527027* m0, Ttype290840* typ0, Intset266030* check0); N_NIMCALL(Ttype290840*, getuniquetype_526640_2036603609)(Ttype290840* key0); N_NIMCALL(Ropeobj177006*, gettypepre_531972_839829468)(Tcgen527027* m0, Ttype290840* typ0); N_NIMCALL(Ropeobj177006*, getsimpletypedesc_531936_839829468)(Tcgen527027* m0, Ttype290840* typ0); N_NIMCALL(Ropeobj177006*, typenameorliteral_531898_839829468)(Ttype290840* t0, NimStringDesc* literal0); N_NIMCALL(Ropeobj177006*, gettypename_531313_839829468)(Ttype290840* typ0); N_NIMCALL(Ropeobj177006*, typename_531292_839829468)(Ttype290840* typ0); N_NIMCALL(NimStringDesc*, reprEnum)(NI e0, TNimType* typ0); N_NIMCALL(Ropeobj177006*, cachegettype_531591_839829468)(Tidtable290850 tab0, Ttype290840* key0); N_NIMCALL(TNimObject*, idtableget_297086_2984716966)(Tidtable290850 t0, Tidobj197004* key0); N_NIMCALL(NimStringDesc*, typetostring_318017_3876443242)(Ttype290840* typ0, Tprefereddesc318011 prefer0); N_NIMCALL(Ttype290840*, elemtype_318394_3876443242)(Ttype290840* t0); N_NIMCALL(Ropeobj177006*, HEX26_177447_2381377266)(Ropeobj177006* a0, NimStringDesc* b0); N_NIMCALL(Ropeobj177006*, gettypeforward_532039_839829468)(Tcgen527027* m0, Ttype290840* typ0); N_NIMCALL(NIM_BOOL, isimportedtype_531449_839829468)(Ttype290840* t0); N_NIMCALL(NimStringDesc*, getforwardstructformat_532015_839829468)(Tcgen527027* m0); N_NIMCALL(Ropeobj177006*, structorunion_532001_839829468)(Ttype290840* t0); N_NIMCALL(void, idtableput_297094_2984716966)(Tidtable290850* t0, Tidobj197004* key0, TNimObject* val0); N_NIMCALL(void, pushtype_531958_839829468)(Tcgen527027* m0, Ttype290840* typ0); N_NIMCALL(Ropeobj177006*, gettypedescweak_532079_839829468)(Tcgen527027* m0, Ttype290840* t0, Intset266030* check0); N_NIMCALL(void, internalerror_194100_155036129)(Tlineinfo189336 info0, NimStringDesc* errmsg0); N_NIMCALL(NIM_BOOL, hasenum_201230_1926258066)(Debuginfo201009 self0, NimStringDesc* ename0, NI id0, NU32 owner0); N_NIMCALL(void*, newSeq)(TNimType* typ0, NI len0); static N_INLINE(NI, len_291081_850551059)(Tnode290802* n0); N_NIMCALL(void, registerenum_201419_1926258066)(Debuginfo201009* self0, Enumdesc201007* ed0); N_NIMCALL(void, genericSeqAssign)(void* dest0, void* src_86004_1689653243, TNimType* mt0); N_NIMCALL(void, appcg_530632_839829468)(Tcgen527027* m0, Ropeobj177006** c0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0); N_NIMCALL(NI64, lengthord_318007_3876443242)(Ttype290840* t0); N_NIMCALL(NIM_BOOL, scancppgenericslot_532827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0); N_NIMCALL(Ttype290840*, resolvestarsincpptype_532891_839829468)(Ttype290840* typ0, NI idx0, NI stars0); N_NIMCALL(NI, len_293339_850551059)(Ttype290840* n0); N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI start0); N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI first0); N_NIMCALL(Ropeobj177006*, getrecorddesc_532643_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0, Intset266030* check0); N_NIMCALL(Ropeobj177006*, getrecordfields_532636_839829468)(Tcgen527027* m0, Ttype290840* typ0, Intset266030* check0); N_NIMCALL(Ropeobj177006*, genrecordfieldsaux_532421_839829468)(Tcgen527027* m0, Tnode290802* n0, Ropeobj177006* accessexpr0, Ttype290840* rectype0, Intset266030* check0); N_NIMCALL(NI, sonslen_293351_850551059)(Tnode290802* n0); N_NIMCALL(Tnode290802*, lastson_293364_850551059)(Tnode290802* n0); N_NIMCALL(Ropeobj177006*, HEX26_177452_2381377266)(NimStringDesc* a0, Ropeobj177006* b0); N_NIMCALL(Ropeobj177006*, manglerecfieldname_532361_839829468)(Tsym290834* field0, Ttype290840* rectype0); N_NIMCALL(NimStringDesc*, manglefield_530973_839829468)(Tident197010* name0); N_NIMCALL(NIM_CHAR, nsuToUpperAsciiChar)(NIM_CHAR c0); N_NIMCALL(Ropeobj177006*, gettupledesc_532777_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0, Intset266030* check0); N_NIMCALL(NI, sonslen_293327_850551059)(Ttype290840* n0); N_NIMCALL(void, excl_266841_2627731572)(Intset266030* s0, NI key0); static N_INLINE(NIM_BOOL, iscompiletimeonly_326706_3876443242)(Ttype290840* t0); N_NIMCALL(Tstorageloc290812, paramstorageloc_532098_839829468)(Tsym290834* param0); N_NIMCALL(NIM_BOOL, ccgintroducedptr_531609_839829468)(Tsym290834* s0); N_NIMCALL(Tctypekind527007, mapreturntype_531445_839829468)(Ttype290840* typ0); N_NIMCALL(Tnode290802*, easyresultasgn_558191_839829468)(Tnode290802* n0); static N_INLINE(Tnode290802*, HEX5BHEX5D_291238_850551059)(Tnode290802* n0, NI i0); N_NIMCALL(Tnode290802*, getbody_333227_1724185294)(Tsym290834* s0); N_NIMCALL(Ropeobj177006*, localvardecl_536532_839829468)(Tcproc527021* p0, Tsym290834* s0); N_NIMCALL(Ropeobj177006*, gettypedesc_533671_839829468)(Tcgen527027* m0, Ttype290840* typ0); N_NIMCALL(void, initlocexprsingleuse_537289_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* result0); N_NIMCALL(void, initloc_530273_839829468)(Tloc290816* result0, Tlockind290808 k0, Ttype290840* typ0, Tstorageloc290812 s0); N_NIMCALL(void, linefmt_530714_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0); static N_INLINE(Ropeobj177006**, s_527179_3723162438)(Tcproc527021* p0, Tcprocsection527011 s0); N_NIMCALL(Ropeobj177006*, indentline_530656_839829468)(Tcproc527021* p0, Ropeobj177006* r0); N_NIMCALL(void, prepend_177893_2381377266)(Ropeobj177006** a0, Ropeobj177006* b0); N_NIMCALL(Ropeobj177006*, rdloc_536188_839829468)(Tloc290816 a0); N_NIMCALL(void, assignlocalvar_536614_839829468)(Tcproc527021* p0, Tsym290834* s0); N_NIMCALL(void, line_530690_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, Ropeobj177006* r0); N_NIMCALL(void, localdebuginfo_536449_839829468)(Tcproc527021* p0, Tsym290834* s0); N_NIMCALL(void, linef_530700_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0); N_NIMCALL(Ropeobj177006*, makecstring_189638_155036129)(NimStringDesc* s0); N_NIMCALL(NimStringDesc*, nsuNormalize)(NimStringDesc* s0); N_NIMCALL(Ropeobj177006*, gentypeinfo_533941_839829468)(Tcgen527027* m0, Ttype290840* t_533944_839829468); N_NIMCALL(Tcgen527027*, bmod_527201_3723162438)(Tsym290834* module0); N_NIMCALL(void, gentypeinfoauxbase_533960_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ttype290840* origtype0, Ropeobj177006* name0, Ropeobj177006* base0); N_NIMCALL(NIM_BOOL, canformacycle_318123_3876443242)(Ttype290840* typ0); N_NIMCALL(void, gentupleinfo_534549_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0); N_NIMCALL(Ropeobj177006*, getnimnode_533945_839829468)(Tcgen527027* m0); N_NIMCALL(Ttype290840*, fakeclosuretype_535010_839829468)(Tsym290834* owner0); N_NIMCALL(Ttype290840*, newtype_293107_850551059)(Ttypekind290244 kind0, Tsym290834* owner0); N_NIMCALL(void, rawaddson_294394_850551059)(Ttype290840* father0, Ttype290840* son0); N_NIMCALL(void, gentypeinfoaux_534027_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ttype290840* origtype0, Ropeobj177006* name0); N_NIMCALL(Ropeobj177006*, gentraverseproc_535632_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ttypeinforeason535016 reason0); N_NIMCALL(void, gentraverseprocseq_535399_839829468)(Ttraversalclosure535019* c0, Ropeobj177006* accessor0, Ttype290840* typ0); N_NIMCALL(void, gettemp_535032_839829468)(Tcproc527021* p0, Ttype290840* t0, Tloc290816* result0, NIM_BOOL needsinit0); N_NIMCALL(void, constructloc_536388_839829468)(Tcproc527021* p0, Tloc290816 loc0, NIM_BOOL istemp0); static N_INLINE(NIM_BOOL, iscomplexvaluetype_536317_839829468)(Ttype290840* t0); N_NIMCALL(void, usestringh_530345_839829468)(Tcgen527027* m0); N_NIMCALL(Ropeobj177006*, addrloc_536204_839829468)(Tloc290816 a0); N_NIMCALL(void, genobjectinit_536242_839829468)(Tcproc527021* p0, Tcprocsection527011 section0, Ttype290840* t0, Tloc290816 a0, NIM_BOOL takeaddr0); N_NIMCALL(Ttypefieldresult318145, analyseobjectwithtypefield_318149_3876443242)(Ttype290840* t0); N_NIMCALL(Ttype290840*, getsystype_336150_3937434831)(Ttypekind290244 kind0); N_NIMCALL(void, gentraverseproc_535022_839829468)(Ttraversalclosure535019* c0, Ropeobj177006* accessor0, Ttype290840* typ_535027_839829468); static N_INLINE(Ropeobj177006*, parentobj_535257_839829468)(Ropeobj177006* accessor0, Tcgen527027* m0); N_NIMCALL(void, gentraverseproc_535039_839829468)(Ttraversalclosure535019* c0, Ropeobj177006* accessor0, Tnode290802* n0); N_NIMCALL(void, gencaserange_535028_839829468)(Tcproc527021* p0, Tnode290802* branch0); N_NIMCALL(Ropeobj177006*, genliteral_537273_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(Ropeobj177006*, genliteral_547476_839829468)(Tcproc527021* p0, Tnode290802* n0, Ttype290840* ty0); N_NIMCALL(Ropeobj177006*, intliteral_537270_839829468)(NI64 i0); N_NIMCALL(Ropeobj177006*, int64literal_547430_839829468)(NI64 i0); N_NIMCALL(Ropeobj177006*, uint64literal_547442_839829468)(NU64 i0); N_NIMCALL(NI, nodetabletestorset_340682_1142335848)(Tnodetable290862* t0, Tnode290802* key0, NI val0); N_NIMCALL(Ropeobj177006*, getstrlit_547468_839829468)(Tcgen527027* m0, NimStringDesc* s0); N_NIMCALL(NimStringDesc*, tostrmaxprecision_296007_3471544153)(NF f0); N_NIMCALL(Tnode290802*, copynode_294528_850551059)(Tnode290802* src0); N_NIMCALL(void, linecg_530707_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0); N_NIMCALL(void, genarrayinfo_535005_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0); N_NIMCALL(void, gensetinfo_534867_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0); N_NIMCALL(void, genenuminfo_534597_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0); N_NIMCALL(void, genobjectinfo_534506_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ttype290840* origtype0, Ropeobj177006* name0); N_NIMCALL(void, genobjectfields_534104_839829468)(Tcgen527027* m0, Ttype290840* typ0, Tnode290802* n0, Ropeobj177006* expr0); N_NIMCALL(Ropeobj177006*, discriminatortablename_534057_839829468)(Tcgen527027* m0, Ttype290840* objtype_534060_839829468, Tsym290834* d0); N_NIMCALL(Tsym290834*, lookupinrecord_297119_2984716966)(Tnode290802* n0, Tident197010* field0); N_NIMCALL(NI64, getordvalue_318129_3876443242)(Tnode290802* n0); N_NIMCALL(void, gendeepcopyproc_536066_839829468)(Tcgen527027* m0, Tsym290834* s0, Ropeobj177006* result0); N_NIMCALL(void, initlocalvar_536398_839829468)(Tcproc527021* p0, Tsym290834* v0, NIM_BOOL immediateasgn0); N_NIMCALL(void, fillresult_531865_839829468)(Tsym290834* param0); N_NIMCALL(void, assignparam_536994_839829468)(Tcproc527021* p0, Tsym290834* s0); N_NIMCALL(void, closuresetup_558158_839829468)(Tcproc527021* p0, Tsym290834* prc0); N_NIMCALL(Ropeobj177006*, initgcframe_536435_839829468)(Tcproc527021* p0); N_NIMCALL(Ropeobj177006*, initframe_558140_839829468)(Tcproc527021* p0, Ropeobj177006* procname0, Ropeobj177006* filename0); N_NIMCALL(Ropeobj177006*, quotedfilename_194818_155036129)(Tlineinfo189336 i0); N_NIMCALL(void, appcg_530648_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0); N_NIMCALL(Ropeobj177006*, deinitgcframe_536441_839829468)(Tcproc527021* p0); N_NIMCALL(Ropeobj177006*, deinitframe_558150_839829468)(Tcproc527021* p0); N_NIMCALL(Tcgen527027*, findpendingmodule_530241_839829468)(Tcgen527027* m0, Tsym290834* s0); N_NIMCALL(void, symindynamiclib_557929_839829468)(Tcgen527027* m0, Tsym290834* sym0); N_NIMCALL(NIM_BOOL, isgetprocaddr_557442_839829468)(Tlib290820* lib0); N_NIMCALL(void, loaddynamiclib_557480_839829468)(Tcgen527027* m0, Tlib290820* lib0); N_NIMCALL(void, libcandidates_169605_2607990831)(NimStringDesc* s0, TY134602** dest0); N_NIMCALL(void, rawmessage_192612_155036129)(Tmsgkind189002 msg0, NimStringDesc* arg0); N_NIMCALL(void, initlocexpr_537283_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* result0); N_NIMCALL(Ropeobj177006*, mangledynlibproc_536816_839829468)(Tsym290834* sym0); N_NIMCALL(NimStringDesc*, HEX24_177856_2381377266)(Ropeobj177006* r0); N_NIMCALL(void, symindynamiclibpartial_558071_839829468)(Tcgen527027* m0, Tsym290834* sym0); N_NIMCALL(void, genvarprototype_537236_839829468)(Tcgen527027* m0, Tsym290834* sym0); N_NIMCALL(void, genvarprototypeaux_542254_839829468)(Tcgen527027* m0, Tsym290834* sym0); N_NIMCALL(void, declarethreadvar_536676_839829468)(Tcgen527027* m0, Tsym290834* s0, NIM_BOOL isextern0); static N_INLINE(NIM_BOOL, emulatedthreadvars_530949_839829468)(void); static N_INLINE(NIM_BOOL, crossescppboundary_558754_839829468)(Tcgen527027* m0, Tsym290834* sym0); N_NIMCALL(void, putlocintodest_537258_839829468)(Tcproc527021* p0, Tloc290816* d0, Tloc290816 s0); N_NIMCALL(void, genassignment_537264_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0, Tassignmentflag536302Set flags0); N_NIMCALL(void, genrefassign_536311_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0, Tassignmentflag536302Set flags0); static N_INLINE(NIM_BOOL, usesnativegc_168177_2607990831)(void); N_NIMCALL(void, optasgnloc_547788_839829468)(Tloc290816 a0, Ttype290840* t0, Ropeobj177006* field0, Tloc290816* Result); N_NIMCALL(void, genoptasgntuple_548001_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0, Tassignmentflag536302Set flags0); N_NIMCALL(void, gengenericasgn_548167_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0, Tassignmentflag536302Set flags0); N_NIMCALL(NI, asgncomplexity_547750_839829468)(Tnode290802* n0); N_NIMCALL(void, genoptasgnobject_548084_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0, Tassignmentflag536302Set flags0, Tnode290802* t0); N_NIMCALL(void, genericAssign)(void* dest0, void* src0, TNimType* mt0); N_NIMCALL(void, localerror_194085_155036129)(Tlineinfo189336 info0, NimStringDesc* arg0); N_NIMCALL(NIM_BOOL, issimpleconst_530311_839829468)(Ttype290840* typ0); N_NIMCALL(void, putintodest_548468_839829468)(Tcproc527021* p0, Tloc290816* d0, Ttype290840* t0, Ropeobj177006* r0, Tstorageloc290812 s0); N_NIMCALL(void, gencomplexconst_556249_839829468)(Tcproc527021* p0, Tsym290834* sym0, Tloc290816* d0); N_NIMCALL(void, requestconstimpl_537240_839829468)(Tcproc527021* p0, Tsym290834* sym0); N_NIMCALL(Ropeobj177006*, genconstexpr_552849_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(void, tobitset_338001_452470228)(Tnode290802* s0, Tbitset337004** b0); N_NIMCALL(Ropeobj177006*, genrawsetdata_547629_839829468)(Tbitset337004* cs0, NI size0); N_NIMCALL(NimStringDesc*, nsuToHex)(NI64 x0, NI len0); N_NIMCALL(NI64, bitsettoword_547578_839829468)(Tbitset337004* s0, NI size0); N_NIMCALL(Ropeobj177006*, genconstseq_557371_839829468)(Tcproc527021* p0, Tnode290802* n0, Ttype290840* t0); N_NIMCALL(void, appcg_530640_839829468)(Tcgen527027* m0, Tcfilesection527005 s0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0); N_NIMCALL(Ropeobj177006*, genconstsimplelist_557299_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(Ropeobj177006*, gennamedconstexpr_557284_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(void, accessthreadlocalvar_530945_839829468)(Tcproc527021* p0, Tsym290834* s0); static N_INLINE(Ropeobj177006**, procsec_527194_3723162438)(Tcproc527021* p0, Tcprocsection527011 s0); static N_INLINE(NIM_BOOL, isemptytype_295440_850551059)(Ttype290840* t0); N_NIMCALL(void, putdataintodest_548436_839829468)(Tcproc527021* p0, Tloc290816* d0, Ttype290840* t0, Ropeobj177006* r0); N_NIMCALL(void, genlinedir_530823_839829468)(Tcproc527021* p0, Tnode290802* t0); N_NIMCALL(Ropeobj177006*, sourceline_190068_155036129)(Tlineinfo189336 i0); N_NIMCALL(NIM_BOOL, freshlineinfo_530818_839829468)(Tcproc527021* p0, Tlineinfo189336 info0); N_NIMCALL(void, genmagicexpr_555033_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 op0); N_NIMCALL(void, genandor_552311_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 m0); N_NIMCALL(Ropeobj177006*, getlabel_537217_839829468)(Tcproc527021* p0); N_NIMCALL(void, fixlabel_537230_839829468)(Tcproc527021* p0, Ropeobj177006* labl0); N_NIMCALL(void, unaryarith_550646_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 op0); N_NIMCALL(void, unaryarithoverflow_549633_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 m0); N_NIMCALL(void, binaryfloatarith_554728_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 m0); N_NIMCALL(void, binaryarith_549819_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 op0); N_NIMCALL(void, geneqproc_550214_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, binaryarithoverflow_549262_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 m0); N_NIMCALL(Ropeobj177006*, binaryarithoverflowraw_549235_839829468)(Tcproc527021* p0, Ttype290840* t0, Tloc290816 a0, Tloc290816 b0, NimStringDesc* frmt0); N_NIMCALL(Ropeobj177006*, rdcharloc_536227_839829468)(Tloc290816 a0); N_NIMCALL(NI64, lastord_318004_3876443242)(Ttype290840* t0); N_NIMCALL(void, genrepr_553339_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(Ropeobj177006*, lenfield_537305_839829468)(Tcproc527021* p0); N_NIMCALL(void, gcusage_552439_839829468)(Tnode290802* n0); N_NIMCALL(void, message_194095_155036129)(Tlineinfo189336 info0, Tmsgkind189002 msg0, NimStringDesc* arg0); N_NIMCALL(NimStringDesc*, rendertree_309044_382274130)(Tnode290802* n0, Trenderflag309004Set renderflags0); N_NIMCALL(void, gengettypeinfo_553383_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, genswap_553638_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, unaryexpr_549209_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0); N_NIMCALL(void, binarystmt_548501_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genstrconcat_552452_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, genstrappend_552554_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, genseqelemappend_552683_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, genstrequals_554666_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, binaryexpr_548549_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genisnil_550620_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, gendollar_553391_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genof_553331_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, genof_553201_839829468)(Tcproc527021* p0, Tnode290802* x0, Ttype290840* typ0, Tloc290816* d0); N_NIMCALL(void, globalerror_194071_155036129)(Tlineinfo189336 info0, Tmsgkind189002 msg0, NimStringDesc* arg0); N_NIMCALL(Ropeobj177006*, genofhelper_553139_839829468)(Tcproc527021* p0, Ttype290840* dest0, Ropeobj177006* a0); N_NIMCALL(void, gennew_552782_839829468)(Tcproc527021* p0, Tnode290802* e0); N_NIMCALL(void, rawgennew_552741_839829468)(Tcproc527021* p0, Tloc290816 a0, Ropeobj177006* sizeexpr_552745_839829468); N_NIMCALL(void, gennewfinalize_553110_839829468)(Tcproc527021* p0, Tnode290802* e0); N_NIMCALL(void, gennewseq_552824_839829468)(Tcproc527021* p0, Tnode290802* e0); N_NIMCALL(void, gennewseqaux_552795_839829468)(Tcproc527021* p0, Tloc290816 dest0, Ropeobj177006* length0); N_NIMCALL(void, gennewseqofcap_552836_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, gensomecast_554480_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(Ropeobj177006*, getclosuretype_533683_839829468)(Tcgen527027* m0, Ttype290840* t0, Tclosuretypekind533679 kind0); N_NIMCALL(void, genord_554474_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, unaryexprchar_549222_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0); N_NIMCALL(void, genarraylen_553415_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 op0); N_NIMCALL(void, unarystmt_548527_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0); N_NIMCALL(void, gensetlengthstr_553632_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, gensetlengthseq_553500_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, gensetop_554419_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 op0); N_NIMCALL(void, binarystmtinexcl_553857_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0); N_NIMCALL(Ropeobj177006*, rdsetelemloc_553662_839829468)(Tloc290816 a0, Ttype290840* settype0); N_NIMCALL(void, binaryexprchar_548809_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0); N_NIMCALL(void, geninop_554009_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(NIM_BOOL, fewcmps_553803_839829468)(Tnode290802* s0); N_NIMCALL(void, geninexpraux_551496_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* a0, Tloc290816* b0, Tloc290816* d0); N_NIMCALL(void, binaryexprin_553837_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* a0, Tloc290816* b0, Tloc290816* d0, NimStringDesc* frmt0); N_NIMCALL(void, gencall_541632_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, genclosurecall_538452_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* d0); N_NIMCALL(Ropeobj177006*, genarg_537787_839829468)(Tcproc527021* p0, Tnode290802* n_537790_839829468, Tsym290834* param0, Tnode290802* call0); static N_INLINE(Ropeobj177006*, genargstringtocstring_537776_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(Ropeobj177006*, openarrayloc_537665_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(Tnode290802*, skipconv_326882_3876443242)(Tnode290802* n0); N_NIMCALL(Tmagic290524, getmagic_316502_2616423590)(Tnode290802* op0); N_NIMCALL(Ropeobj177006*, genargnoparam_537938_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(Ropeobj177006*, getrawproctype_538459_839829468)(Tcproc527021* p0, Ttype290840* t0); N_NIMCALL(NIM_BOOL, leftappearsonrightside_537329_839829468)(Tnode290802* le0, Tnode290802* ri0); N_NIMCALL(Tanalysisresult471003, ispartof_471340_788060399)(Tnode290802* a0, Tnode290802* b0); static N_INLINE(NIM_BOOL, hasnoinit_537383_839829468)(Tnode290802* call0); N_NIMCALL(void, resetloc_536350_839829468)(Tcproc527021* p0, Tloc290816* loc0); N_NIMCALL(Ropeobj177006*, addcomma_538464_839829468)(Ropeobj177006* r0); N_NIMCALL(void, geninfixcall_539929_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* d0); N_NIMCALL(NIM_BOOL, contains_109056_4286263276)(NimStringDesc* s0, char136Set chars0); N_NIMCALL(Ropeobj177006*, genpatterncall_539699_839829468)(Tcproc527021* p0, Tnode290802* ri_539702_839829468, NimStringDesc* pat0, Ttype290840* typ_539704_839829468); N_NIMCALL(Ropeobj177006*, genotherarg_537277_839829468)(Tcproc527021* p0, Tnode290802* ri0, NI i0, Ttype290840* typ0); N_NIMCALL(Ropeobj177006*, genthisarg_539475_839829468)(Tcproc527021* p0, Tnode290802* ri_539478_839829468, NI i0, Ttype290840* typ0); N_NIMCALL(Tnode290802*, skipaddrderef_539433_839829468)(Tnode290802* node0); N_NIMCALL(void, fixupcall_537410_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* d0, Ropeobj177006* callee0, Ropeobj177006* params0); N_NIMCALL(void, gennamedparamcall_540616_839829468)(Tcproc527021* p0, Tnode290802* ri0, Tloc290816* d0); N_NIMCALL(NIM_BOOL, contains_109046_4286263276)(NimStringDesc* s0, NIM_CHAR c0); N_NIMCALL(void, genprefixcall_537960_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* d0); static N_INLINE(void, poststmtactions_530942_839829468)(Tcproc527021* p0); N_NIMCALL(void, genreset_552731_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(void, genecho_552369_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(NimStringDesc*, nsuRepeatStr)(NimStringDesc* s0, NI n0); N_NIMCALL(void, genarrtoseq_553046_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0); N_NIMCALL(void, genseqconstr_553004_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0); N_NIMCALL(void, localerror_194080_155036129)(Tlineinfo189336 info0, Tmsgkind189002 msg0, NimStringDesc* arg0); N_NIMCALL(Tnode290802*, wrapprocforspawn_433501_2218250499)(Tsym290834* owner0, Tnode290802* spawnexpr0, Ttype290840* rettype0, Tnode290802* barrier0, Tnode290802* dest0); N_NIMCALL(Tnode290802*, liftparallel_476822_1773027539)(Tsym290834* owner0, Tnode290802* n0); N_NIMCALL(void, gendeepcopy_548374_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0); N_NIMCALL(NIM_BOOL, isdeepconstexpr_316566_2616423590)(Tnode290802* n0); N_NIMCALL(Ropeobj177006*, gensetnode_547664_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(void, gensetconstr_555496_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x0); N_NIMCALL(void, exprcomplexconst_556684_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, genarrayconstr_556207_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(NIM_BOOL, handleconstexpr_552853_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, gentupleconstr_555618_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, genobjconstr_552903_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(Tsym290834*, lookupfieldagain_551153_839829468)(Tcproc527021* p0, Ttype290840* ty_551156_839829468, Tsym290834* field0, Ropeobj177006** r0); N_NIMCALL(void, genfieldcheck_551504_839829468)(Tcproc527021* p0, Tnode290802* e0, Ropeobj177006* obj0, Tsym290834* field0, Ttype290840* origty0); N_NIMCALL(Tnode290802*, newstrnode_291678_850551059)(Tnodekind290020 kind0, NimStringDesc* strval0); N_NIMCALL(void, gencast_554537_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, genconv_554632_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(NIM_BOOL, comparetypes_324214_3876443242)(Ttype290840* x0, Ttype290840* y0, Tdistinctcompare322427 cmp0, Ttypecmpflag322429Set flags0); N_NIMCALL(void, genaddr_551051_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); static N_INLINE(NIM_BOOL, iscppref_550807_839829468)(Tcproc527021* p0, Ttype290840* typ0); N_NIMCALL(void, genbracketexpr_552277_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, genarrayelem_552093_839829468)(Tcproc527021* p0, Tnode290802* x0, Tnode290802* y0, Tloc290816* d0); N_NIMCALL(NIM_BOOL, isconstexpr_316510_2616423590)(Tnode290802* n0); N_NIMCALL(void, genopenarrayelem_552169_839829468)(Tcproc527021* p0, Tnode290802* x0, Tnode290802* y0, Tloc290816* d0); N_NIMCALL(void, genseqelem_552205_839829468)(Tcproc527021* p0, Tnode290802* x0, Tnode290802* y0, Tloc290816* d0); N_NIMCALL(void, gencstringelem_552144_839829468)(Tcproc527021* p0, Tnode290802* x0, Tnode290802* y0, Tloc290816* d0); N_NIMCALL(void, gentupleelem_551124_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, genderef_541921_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NIM_BOOL enforcederef0); N_NIMCALL(void, genrecordfield_551448_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(Ttype290840*, genrecordfieldaux_551096_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tloc290816* a0); N_NIMCALL(void, gencheckedrecordfield_552046_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0); N_NIMCALL(void, genblock_544083_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(NI, startblock_541978_839829468)(Tcproc527021* p0, NimStringDesc* start0, Ropeobj177006** args0, NI args0Len0); N_NIMCALL(void, endblock_542060_839829468)(Tcproc527021* p0); N_NIMCALL(void, endblock_542035_839829468)(Tcproc527021* p0, Ropeobj177006* blockend0); N_NIMCALL(Ropeobj177006*, blockbody_542025_839829468)(Tblock527019* b0); N_NIMCALL(void, genstmtlistexpr_556402_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, genif_542982_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, downconv_556581_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(NI, inheritancediff_324252_3876443242)(Ttype290840* a0, Ttype290840* b0); N_NIMCALL(void, upconv_556431_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, genrangechck_554590_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0, NimStringDesc* magic0); N_NIMCALL(void, convstrtocstr_554642_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, convcstrtostr_554654_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, genclosure_555836_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); static N_INLINE(NIM_BOOL, isconstclosure_555810_839829468)(Tnode290802* n0); static N_INLINE(NIM_BOOL, isroutine_295323_850551059)(Tsym290834* s0); N_NIMCALL(void, genwhilestmt_543984_839829468)(Tcproc527021* p0, Tnode290802* t0); static N_INLINE(Ropeobj177006*, assignlabel_542020_839829468)(Tblock527019* b0); N_NIMCALL(NIM_BOOL, stmtscontainpragma_526083_2036603609)(Tnode290802* n0, Tspecialword273003 w0); N_NIMCALL(void, gencomputedgoto_543744_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(void, genvarstmt_542854_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(void, gensinglevar_542276_839829468)(Tcproc527021* p0, Tnode290802* a0); N_NIMCALL(void, gengotovar_542258_839829468)(Tcproc527021* p0, Tnode290802* value0); N_NIMCALL(void, assignglobalvar_536819_839829468)(Tcproc527021* p0, Tsym290834* s0); N_NIMCALL(void, varindynamiclib_536812_839829468)(Tcgen527027* m0, Tsym290834* sym0); N_NIMCALL(void, registergcroot_541762_839829468)(Tcproc527021* p0, Tsym290834* v0); N_NIMCALL(Ropeobj177006*, gentraverseprocforglobal_536032_839829468)(Tcgen527027* m0, Tsym290834* s0); static N_INLINE(NIM_BOOL, isassignedimmediately_541781_839829468)(Tnode290802* n0); N_NIMCALL(NIM_BOOL, containshiddenpointer_318120_3876443242)(Ttype290840* typ0); static N_INLINE(void, loadinto_541928_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* a0); N_NIMCALL(void, genasgncall_541695_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* d0); N_NIMCALL(void, genclosurevar_542832_839829468)(Tcproc527021* p0, Tnode290802* a0); N_NIMCALL(void, genvartuple_541794_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(Tnode290802*, lowertupleunpacking_431037_2218250499)(Tnode290802* n0, Tsym290834* owner0); N_NIMCALL(void, genconststmt_542909_839829468)(Tcproc527021* p0, Tnode290802* t0); N_NIMCALL(NIM_BOOL, containscompiletimeonly_326721_3876443242)(Ttype290840* t0); static N_INLINE(NIM_BOOL, emitlazily_530248_839829468)(Tsym290834* s0); N_NIMCALL(void, gencase_545826_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0); N_NIMCALL(void, genstringcase_545416_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0); N_NIMCALL(NI, nextpoweroftwo_100629_1009420244)(NI x0); N_NIMCALL(void, gencasestringbranch_545100_839829468)(Tcproc527021* p0, Tnode290802* b0, Tloc290816 e0, Ropeobj177006* labl0, Ropeobj177006** branches0, NI branches0Len0); N_NIMCALL(NI64, hashstring_526100_2036603609)(NimStringDesc* s0); N_NIMCALL(Ropeobj177006*, gencasesecondpass_544965_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0, NI labid0, NI until0); N_NIMCALL(void, exprblock_542103_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(void, gencasegeneric_545087_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0); N_NIMCALL(Ropeobj177006*, genifforcaseuntil_545021_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc290816 a0); N_NIMCALL(void, gencasegenericbranch_544910_839829468)(Tcproc527021* p0, Tnode290802* b0, Tloc290816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj177006* labl0); N_NIMCALL(void, gengotoforcase_543673_839829468)(Tcproc527021* p0, Tnode290802* casestmt0); N_NIMCALL(void, genordinalcase_545724_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0); N_NIMCALL(NI, ifswitchsplitpoint_545615_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(NIM_BOOL, branchhastoobigrange_545575_839829468)(Tnode290802* b0); N_NIMCALL(void, genreturnstmt_543617_839829468)(Tcproc527021* p0, Tnode290802* t0); N_NIMCALL(void, blockleaveactions_543442_839829468)(Tcproc527021* p0, NI howmanytrys0, NI howmanyexcepts0); static N_INLINE(Tnode290802*, pop_316246_1689653243)(Tnodeseq290796** s0); N_NIMCALL(void, genbreakstmt_544444_839829468)(Tcproc527021* p0, Tnode290802* t0); N_NIMCALL(void, genasgn_547239_839829468)(Tcproc527021* p0, Tnode290802* e0, NIM_BOOL fastasgn0); N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_547080_839829468)(Tcproc527021* p0, Tnode290802* asgn0); N_NIMCALL(void, asgnfielddiscriminant_547209_839829468)(Tcproc527021* p0, Tnode290802* e0); N_NIMCALL(void, gendiscriminantcheck_547144_839829468)(Tcproc527021* p0, Tloc290816 a0, Tloc290816 tmp0, Ttype290840* objtype0, Tsym290834* field0); N_NIMCALL(Ropeobj177006*, discriminatortabledecl_534094_839829468)(Tcgen527027* m0, Ttype290840* objtype0, Tsym290834* d0); N_NIMCALL(void, genasmstmt_546659_839829468)(Tcproc527021* p0, Tnode290802* t0); N_NIMCALL(Ropeobj177006*, genasmoremitstmt_546529_839829468)(Tcproc527021* p0, Tnode290802* t0, NIM_BOOL isasmstmt0); N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest0, NI addlen0); N_NIMCALL(void, gentrycpp_545865_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0); static N_INLINE(void, gensimpleblock_542095_839829468)(Tcproc527021* p0, Tnode290802* stmts0); N_NIMCALL(void, gentry_546114_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0); N_NIMCALL(NIM_BOOL, isdefined_198011_1967573533)(NimStringDesc* symbol0); N_NIMCALL(void, line_530695_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, NimStringDesc* r0); static N_INLINE(Ropeobj177006*, pop_177530_1689653243)(TY189350** s0); N_NIMCALL(void, genraisestmt_544828_839829468)(Tcproc527021* p0, Tnode290802* t0); N_NIMCALL(NimStringDesc*, getraisefrmt_544824_839829468)(Tcproc527021* p0); N_NIMCALL(void, gentypesection_536184_839829468)(Tcgen527027* m0, Tnode290802* n0); N_NIMCALL(void, genpragma_547039_839829468)(Tcproc527021* p_547041_839829468, Tnode290802* n0); N_NIMCALL(Tspecialword273003, whichpragma_316911_2616423590)(Tnode290802* n0); N_NIMCALL(void, genemit_546839_839829468)(Tcproc527021* p0, Tnode290802* t0); N_NIMCALL(Tcfilesection527005, determinesection_546819_839829468)(Tnode290802* n0); N_NIMCALL(NIM_BOOL, nsuStartsWith)(NimStringDesc* s0, NimStringDesc* prefix0); N_NIMCALL(void, genbreakpoint_546862_839829468)(Tcproc527021* p0, Tnode290802* t0); N_NIMCALL(void, genwatchpoint_547016_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(Tsym290834*, skipgenericowner_295279_850551059)(Tsym290834* s0); N_NIMCALL(void, genparforstmt_544208_839829468)(Tcproc527021* p0, Tnode290802* t0); N_NIMCALL(void, genstate_542117_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(void, gengotostate_542144_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(void, genbreakstate_542229_839829468)(Tcproc527021* p0, Tnode290802* n0); N_NIMCALL(void, registermoduletomain_560243_839829468)(Tsym290834* m0); N_NIMCALL(Ropeobj177006*, getinitname_560235_839829468)(Tsym290834* m0); N_NIMCALL(Ropeobj177006*, getsomeinitname_559904_839829468)(Tsym290834* m0, NimStringDesc* suffix0); N_NIMCALL(Ropeobj177006*, getdatinitname_560239_839829468)(Tsym290834* m0); N_NIMCALL(Tnode290802*, generatemethoddispatchers_430151_3853300031)(void); N_NIMCALL(void, genmainproc_559729_839829468)(Tcgen527027* m0); N_NIMCALL(Ropeobj177006*, genfilenames_559688_839829468)(Tcgen527027* m0); N_NIMCALL(void, finishmodule_561420_839829468)(Tcgen527027* m0); N_NIMCALL(void, updatecachedmodule_561813_839829468)(Tcgen527027* m0); N_NIMCALL(NIM_BOOL, mergerequired_528832_2760143328)(Tcgen527027* m0); N_NIMCALL(void, mergefiles_529241_2760143328)(NimStringDesc* cfilename0, Tcgen527027* m0); N_NIMCALL(void, geninitcode_560286_839829468)(Tcgen527027* m0); N_NIMCALL(Ropeobj177006*, gensectionstart_528081_2760143328)(Tcprocsection527011 ps0); N_NIMCALL(Ropeobj177006*, gensectionend_528116_2760143328)(Tcprocsection527011 ps0); N_NIMCALL(Ropeobj177006*, gensectionstart_528015_2760143328)(Tcfilesection527005 fs0); N_NIMCALL(Ropeobj177006*, gensectionend_528050_2760143328)(Tcfilesection527005 fs0); N_NIMCALL(void, finishtypedescriptions_533842_839829468)(Tcgen527027* m0); N_NIMCALL(Ropeobj177006*, genmodule_560491_839829468)(Tcgen527027* m0, NimStringDesc* cfile0); N_NIMCALL(Ropeobj177006*, getfileheader_559683_839829468)(NimStringDesc* cfile0); N_NIMCALL(Ropeobj177006*, getcopyright_559665_839829468)(NimStringDesc* cfile0); N_NIMCALL(NimStringDesc*, getcompilecfilecmd_272284_2528170400)(NimStringDesc* cfilename0, NIM_BOOL isexternal0); static N_INLINE(void, addinttypes_559659_839829468)(Ropeobj177006** result0); N_NIMCALL(Ropeobj177006*, genmergeinfo_528203_2760143328)(Tcgen527027* m0); N_NIMCALL(void, generatethreadlocalstorage_536717_839829468)(Tcgen527027* m0); N_NIMCALL(void, generateheaders_558104_839829468)(Tcgen527027* m0); N_NIMCALL(NimStringDesc*, nsuReplaceChar)(NimStringDesc* s0, NIM_CHAR sub0, NIM_CHAR by0); N_NIMCALL(void, writerope_177836_2381377266)(Ropeobj177006* head0, NimStringDesc* filename0, NIM_BOOL usewarning0); N_NIMCALL(void, addfiletocompile_271863_2528170400)(NimStringDesc* filename0); N_NIMCALL(void, addfiletolink_271872_2528170400)(NimStringDesc* filename0); N_NIMCALL(void, writemodule_561637_839829468)(Tcgen527027* m0, NIM_BOOL pending0); N_NIMCALL(void, generatethreadvarssize_536771_839829468)(Tcgen527027* m0); N_NIMCALL(NIM_BOOL, shouldrecompile_561621_839829468)(Ropeobj177006* code0, NimStringDesc* cfile0); N_NIMCALL(NimStringDesc*, toobjfile_271859_2528170400)(NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, writeropeifnotequal_178511_2381377266)(Ropeobj177006* r0, NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, nosexistsFile)(NimStringDesc* filename0); N_NIMCALL(NIM_BOOL, nosfileNewer)(NimStringDesc* a0, NimStringDesc* b0); N_NIMCALL(void, writemapping_272789_2528170400)(Ropeobj177006* gsymbolmapping0); N_NIMCALL(void, writeheader_561152_839829468)(Tcgen527027* m0); N_NIMCALL(void, nossplitFile)(NimStringDesc* path0, TY124315* Result); N_NIMCALL(void, resetmodule_560763_839829468)(Tcgen527027* m0); N_NIMCALL(void, nullify_560833_839829468)(Ropeobj177006** arr0); N_NIMCALL(void, nullify_560858_839829468)(Ropeobj177006** arr0); STRING_LITERAL(T839829468_4, "\011", 1); STRING_LITERAL(T839829468_10, "compiler/cgen.nim", 17); NIM_CONST TY201018 T839829468_9 = {((NimStringDesc*) &T839829468_10), ((NI) 1158)} ; STRING_LITERAL(T839829468_11, "T", 1); STRING_LITERAL(T839829468_12, "_", 1); STRING_LITERAL(T839829468_13, "added pending module twice: ", 28); STRING_LITERAL(T839829468_14, ".h", 2); STRING_LITERAL(T839829468_15, ".cpp", 4); STRING_LITERAL(T839829468_16, ".m", 2); STRING_LITERAL(T839829468_17, ".c", 2); STRING_LITERAL(T839829468_18, "0", 1); STRING_LITERAL(T839829468_19, "$", 1); STRING_LITERAL(T839829468_20, "ropes: invalid format string $", 30); STRING_LITERAL(T839829468_21, "$N#line $2 $1$N", 15); STRING_LITERAL(T839829468_22, "N_LIB_IMPORT ", 13); STRING_LITERAL(T839829468_23, "N_LIB_EXPORT ", 13); STRING_LITERAL(T839829468_24, "static ", 7); STRING_LITERAL(T839829468_25, "mapType", 7); STRING_LITERAL(T839829468_26, "void", 4); STRING_LITERAL(T839829468_27, "getTypeDescAux: t == nil", 24); STRING_LITERAL(T839829468_28, "TY", 2); STRING_LITERAL(T839829468_29, "getTypeName: ", 13); STRING_LITERAL(T839829468_30, "void*", 5); STRING_LITERAL(T839829468_31, "NimStringDesc", 13); STRING_LITERAL(T839829468_32, "NimStringDesc*", 14); STRING_LITERAL(T839829468_33, "NCSTRING", 8); STRING_LITERAL(T839829468_34, "NIM_BOOL", 8); STRING_LITERAL(T839829468_35, "NIM_CHAR", 8); STRING_LITERAL(T839829468_36, "NI", 2); STRING_LITERAL(T839829468_37, "NI8", 3); STRING_LITERAL(T839829468_38, "NI16", 4); STRING_LITERAL(T839829468_39, "NI32", 4); STRING_LITERAL(T839829468_40, "NI64", 4); STRING_LITERAL(T839829468_41, "NF", 2); STRING_LITERAL(T839829468_42, "NF32", 4); STRING_LITERAL(T839829468_43, "NF64", 4); STRING_LITERAL(T839829468_44, "NF128", 5); STRING_LITERAL(T839829468_45, "NU", 2); STRING_LITERAL(T839829468_46, "NU8", 3); STRING_LITERAL(T839829468_47, "NU16", 4); STRING_LITERAL(T839829468_48, "NU32", 4); STRING_LITERAL(T839829468_49, "NU64", 4); NIM_CONST TY531943 Numericaltypetostr_531941_839829468 = {((NimStringDesc*) &T839829468_36), ((NimStringDesc*) &T839829468_37), ((NimStringDesc*) &T839829468_38), ((NimStringDesc*) &T839829468_39), ((NimStringDesc*) &T839829468_40), ((NimStringDesc*) &T839829468_41), ((NimStringDesc*) &T839829468_42), ((NimStringDesc*) &T839829468_43), ((NimStringDesc*) &T839829468_44), ((NimStringDesc*) &T839829468_45), ((NimStringDesc*) &T839829468_46), ((NimStringDesc*) &T839829468_47), ((NimStringDesc*) &T839829468_48), ((NimStringDesc*) &T839829468_49)} ; STRING_LITERAL(T839829468_50, "tyStatic for getSimpleTypeDesc", 30); STRING_LITERAL(T839829468_51, "cannot generate C type for: ", 28); STRING_LITERAL(T839829468_52, "&", 1); STRING_LITERAL(T839829468_53, "*", 1); STRING_LITERAL(T839829468_54, "$1 $2;$n", 8); STRING_LITERAL(T839829468_55, "typedef $1 $2 $2;$n", 19); STRING_LITERAL(T839829468_56, "union", 5); STRING_LITERAL(T839829468_57, "struct", 6); STRING_LITERAL(T839829468_58, "getTypeForward(", 15); STRING_LITERAL(T839829468_59, "typedef NI32 $1;$n", 18); STRING_LITERAL(T839829468_60, "typedef NU8 $1;$n", 17); STRING_LITERAL(T839829468_61, "typedef NU16 $1;$n", 18); STRING_LITERAL(T839829468_62, "typedef NI64 $1;$n", 18); STRING_LITERAL(T839829468_63, "getTypeDescAux: enum", 20); STRING_LITERAL(T839829468_64, "typedef $1_PTR($2, $3) $4;$n", 28); STRING_LITERAL(T839829468_65, "N_NIMCALL", 9); STRING_LITERAL(T839829468_66, "N_STDCALL", 9); STRING_LITERAL(T839829468_67, "N_CDECL", 7); STRING_LITERAL(T839829468_68, "N_SAFECALL", 10); STRING_LITERAL(T839829468_69, "N_SYSCALL", 9); STRING_LITERAL(T839829468_70, "N_INLINE", 8); STRING_LITERAL(T839829468_71, "N_NOINLINE", 10); STRING_LITERAL(T839829468_72, "N_FASTCALL", 10); STRING_LITERAL(T839829468_73, "N_CLOSURE", 9); STRING_LITERAL(T839829468_74, "N_NOCONV", 8); NIM_CONST TY290016 Callingconvtostr_531585_839829468 = {((NimStringDesc*) &T839829468_65), ((NimStringDesc*) &T839829468_66), ((NimStringDesc*) &T839829468_67), ((NimStringDesc*) &T839829468_68), ((NimStringDesc*) &T839829468_69), ((NimStringDesc*) &T839829468_70), ((NimStringDesc*) &T839829468_71), ((NimStringDesc*) &T839829468_72), ((NimStringDesc*) &T839829468_73), ((NimStringDesc*) &T839829468_74)} ; STRING_LITERAL(T839829468_75, "typedef struct {$nN_NIMCALL_PTR($2, ClPrc) $3;$nvoid* ClEnv;$n}" " $1;$n", 69); STRING_LITERAL(T839829468_76, "struct $2 : #TGenericSeq {$n", 28); STRING_LITERAL(T839829468_77, "struct $2 {$n #TGenericSeq Sup;$n", 34); STRING_LITERAL(T839829468_78, " $1 data[SEQ_DECL_SIZE];$n};$n", 31); STRING_LITERAL(T839829468_79, "TGenericSeq", 11); STRING_LITERAL(T839829468_80, "typedef $1 $2[$3];$n", 20); STRING_LITERAL(T839829468_81, "invalid apostrophe type parameter index", 39); STRING_LITERAL(T839829468_82, "<", 1); STRING_LITERAL(T839829468_83, " COMMA ", 7); STRING_LITERAL(T839829468_84, "> ", 2); extern NIM_CONST TY271427 Cc_271413_2528170400; STRING_LITERAL(T839829468_85, " {$n", 4); STRING_LITERAL(T839829468_86, " {$n#TNimType* m_type;$n", 24); STRING_LITERAL(T839829468_87, " : public $1 {$n", 16); STRING_LITERAL(T839829468_88, " {$n $1 Sup;$n", 15); STRING_LITERAL(T839829468_89, "genRecordFieldsAux", 18); STRING_LITERAL(T839829468_90, "$1.$2", 5); STRING_LITERAL(T839829468_91, "S", 1); STRING_LITERAL(T839829468_92, "struct {", 8); STRING_LITERAL(T839829468_93, "} $1;$n", 7); STRING_LITERAL(T839829468_94, "genRecordFieldsAux(record case branch)", 38); STRING_LITERAL(T839829468_95, "union{$n$1} $2;$n", 17); STRING_LITERAL(T839829468_96, "mangleRecFieldName", 18); STRING_LITERAL(T839829468_97, "$1 $2[SEQ_DECL_SIZE];$n", 23); STRING_LITERAL(T839829468_98, "$1 $2:$3;$n", 11); STRING_LITERAL(T839829468_99, "genRecordFieldsAux()", 20); STRING_LITERAL(T839829468_100, "char dummy;$n", 13); STRING_LITERAL(T839829468_101, "};", 2); STRING_LITERAL(T839829468_102, "$1 $2 {$n", 9); STRING_LITERAL(T839829468_103, "$1 Field$2;$n", 13); STRING_LITERAL(T839829468_104, "char dummy;", 11); STRING_LITERAL(T839829468_105, "Set", 3); STRING_LITERAL(T839829468_106, "typedef NU$2 $1;$n", 18); STRING_LITERAL(T839829468_107, "typedef NU8 $1[$2];$n", 21); STRING_LITERAL(T839829468_108, "getTypeDescAux(", 15); STRING_LITERAL(T839829468_109, "genProcParams", 13); STRING_LITERAL(T839829468_110, ", ", 2); STRING_LITERAL(T839829468_111, " ", 1); STRING_LITERAL(T839829468_112, ", NI $1Len$2", 12); STRING_LITERAL(T839829468_113, " Result", 7); STRING_LITERAL(T839829468_114, "void* ClEnv", 11); STRING_LITERAL(T839829468_115, "...", 3); STRING_LITERAL(T839829468_116, "void)", 5); STRING_LITERAL(T839829468_117, ")", 1); STRING_LITERAL(T839829468_118, "(", 1); STRING_LITERAL(T839829468_119, "$1($2, $3)$4", 12); STRING_LITERAL(T839829468_120, "proc has no result symbol", 25); STRING_LITERAL(T839829468_121, " register", 9); STRING_LITERAL(T839829468_122, " volatile", 9); STRING_LITERAL(T839829468_123, "$1 = $2;$n", 10); STRING_LITERAL(T839829468_124, "(*$1)", 5); STRING_LITERAL(T839829468_125, ";", 1); STRING_LITERAL(T839829468_126, "FR.s[$1].address = (void*)$3; FR.s[$1].typ = $4; FR.s[$1].name " "= $2;$n", 70); STRING_LITERAL(T839829468_127, "NTI$1", 5); STRING_LITERAL(T839829468_128, "(&", 2); STRING_LITERAL(T839829468_129, "TNimType", 8); STRING_LITERAL(T839829468_130, "TNimNode", 8); STRING_LITERAL(T839829468_131, "extern TNimType $1; /* $2 */$n", 30); STRING_LITERAL(T839829468_132, "0", 1); STRING_LITERAL(T839829468_133, "void*", 5); STRING_LITERAL(T839829468_134, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53); STRING_LITERAL(T839829468_135, "$1.flags = $2;$n", 16); STRING_LITERAL(T839829468_136, "TNimType $1; /* $2 */$n", 23); STRING_LITERAL(T839829468_137, "genTypeInfo(", 12); STRING_LITERAL(T839829468_138, "$1[$2]", 6); STRING_LITERAL(T839829468_139, "static TNimNode* $1[$2];$n", 26); STRING_LITERAL(T839829468_140, "$1[$2] = &$3;$n", 15); STRING_LITERAL(T839829468_141, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$" "n$1.name = \"Field$3\";$n", 86); STRING_LITERAL(T839829468_142, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45); STRING_LITERAL(T839829468_143, "$1.len = $2; $1.kind = 2;$n", 27); STRING_LITERAL(T839829468_144, "$1.node = &$2;$n", 16); STRING_LITERAL(T839829468_145, "#nimGCvisit((void*)$1, op);$n", 29); STRING_LITERAL(T839829468_146, "N_NIMCALL(void, $1)(void* p, NI op)", 35); STRING_LITERAL(T839829468_147, "$1 a;$n", 7); STRING_LITERAL(T839829468_148, "a = ($1)p;$n", 12); STRING_LITERAL(T839829468_149, "LOC", 3); STRING_LITERAL(T839829468_150, "$1 = ($2)0;$n", 13); STRING_LITERAL(T839829468_151, "<string.h>", 10); STRING_LITERAL(T839829468_152, "memset((void*)$1, 0, sizeof($2));$n", 35); STRING_LITERAL(T839829468_153, ".Sup", 4); STRING_LITERAL(T839829468_154, "$1.m_type = $2;$n", 17); STRING_LITERAL(T839829468_155, "#objectInit($1, $2);$n", 22); STRING_LITERAL(T839829468_156, "for ($1 = 0; $1 < $2->$3; $1++) {$n", 35); STRING_LITERAL(T839829468_157, "len", 3); STRING_LITERAL(T839829468_158, "Sup.len", 7); STRING_LITERAL(T839829468_159, "for ($1 = 0; $1 < $2; $1++) {$n", 31); STRING_LITERAL(T839829468_160, "}$n", 3); STRING_LITERAL(T839829468_161, "$1.Sup", 6); STRING_LITERAL(T839829468_162, "genTraverseProc", 15); STRING_LITERAL(T839829468_163, "switch ($1.$2) {$n", 18); STRING_LITERAL(T839829468_164, "case $1 ... $2:$n", 17); STRING_LITERAL(T839829468_165, "genLiteral: ty is nil", 21); STRING_LITERAL(T839829468_166, "(-2147483647 -1)", 16); STRING_LITERAL(T839829468_167, "IL64($1)", 8); STRING_LITERAL(T839829468_168, "(IL64(-9223372036854775807) - IL64(1))", 38); STRING_LITERAL(T839829468_169, "NIM_TRUE", 8); STRING_LITERAL(T839829468_170, "NIM_FALSE", 9); STRING_LITERAL(T839829468_171, "ULL", 3); STRING_LITERAL(T839829468_172, "(($1) $2)", 9); STRING_LITERAL(T839829468_173, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45); STRING_LITERAL(T839829468_174, "NIM_NIL", 7); STRING_LITERAL(T839829468_175, "((#NimStringDesc*) NIM_NIL)", 27); STRING_LITERAL(T839829468_176, "((#NimStringDesc*) &$1)", 23); STRING_LITERAL(T839829468_177, "STRING_LITERAL($1, $2, $3);$n", 29); STRING_LITERAL(T839829468_178, "((#NimStringDesc*) &$1$2)", 25); STRING_LITERAL(T839829468_179, "genLiteral(", 11); STRING_LITERAL(T839829468_180, "case $1:$n", 10); STRING_LITERAL(T839829468_181, "default:$n", 10); STRING_LITERAL(T839829468_182, "break;$n", 8); STRING_LITERAL(T839829468_183, "} $n", 4); STRING_LITERAL(T839829468_184, "genTraverseProc()", 17); STRING_LITERAL(T839829468_185, "$1.Field$2", 10); STRING_LITERAL(T839829468_186, "$1.ClEnv", 8); STRING_LITERAL(T839829468_187, "$1->data[$2]", 12); STRING_LITERAL(T839829468_188, "a", 1); STRING_LITERAL(T839829468_189, "(*a)", 4); STRING_LITERAL(T839829468_190, "$1 {$n$2$3$4}$n", 15); STRING_LITERAL(T839829468_191, "$1;$n", 5); STRING_LITERAL(T839829468_192, "$1.marker = $2;$n", 17); STRING_LITERAL(T839829468_193, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43); STRING_LITERAL(T839829468_194, "$1.offset = $2;$n", 17); STRING_LITERAL(T839829468_195, "NI $1;$n", 8); STRING_LITERAL(T839829468_196, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41); STRING_LITERAL(T839829468_197, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o" "ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127); STRING_LITERAL(T839829468_198, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61); STRING_LITERAL(T839829468_199, "$1.flags = 1<<2;$n", 18); STRING_LITERAL(T839829468_200, "anonymous obj with discriminator", 32); STRING_LITERAL(T839829468_201, "NimDT_$1_$2", 11); STRING_LITERAL(T839829468_202, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107); STRING_LITERAL(T839829468_203, "TNimNode* $1[$2];$n", 19); STRING_LITERAL(T839829468_204, "genObjectFields; nkOfBranch broken", 34); STRING_LITERAL(T839829468_205, "genObjectFields(nkRecCase)", 26); STRING_LITERAL(T839829468_206, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n", 74); STRING_LITERAL(T839829468_207, "genObjectFields", 15); STRING_LITERAL(T839829468_208, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49); STRING_LITERAL(T839829468_209, "\011return $1;$n", 13); STRING_LITERAL(T839829468_210, "Result", 6); STRING_LITERAL(T839829468_211, "closure generation failed", 25); STRING_LITERAL(T839829468_212, "$1 = ($2) ClEnv;$n", 18); STRING_LITERAL(T839829468_213, "__declspec(noreturn) ", 21); STRING_LITERAL(T839829468_214, "__declspec(naked) ", 18); STRING_LITERAL(T839829468_215, "$N$1 {$n$2$3$4}$N$N", 19); STRING_LITERAL(T839829468_216, "$N$1 {$N", 8); STRING_LITERAL(T839829468_217, "struct {$1} GCFRAME;$n", 22); STRING_LITERAL(T839829468_218, "nimFrame", 8); STRING_LITERAL(T839829468_219, "VarSlot", 7); STRING_LITERAL(T839829468_220, "\011nimfrs($1, $2, $3, $4)$N", 25); STRING_LITERAL(T839829468_221, "\011nimfr($1, $2)$N", 16); STRING_LITERAL(T839829468_222, "\011#nimProfile();$n", 17); STRING_LITERAL(T839829468_223, "{", 1); STRING_LITERAL(T839829468_224, "\011}BeforeRet: ;$n", 16); STRING_LITERAL(T839829468_225, "if (((NU)&GCFRAME) < 4096) #nimGCFrame(&GCFRAME);$n", 51); STRING_LITERAL(T839829468_226, "\011#popFrame();$n", 15); STRING_LITERAL(T839829468_227, "}$N", 3); STRING_LITERAL(T839829468_228, "static void* $1;$n", 18); STRING_LITERAL(T839829468_229, "||", 2); STRING_LITERAL(T839829468_230, "($1 = #nimLoadLibrary((#NimStringDesc*) &$2))$n", 47); STRING_LITERAL(T839829468_231, "if (!($1)) #nimLoadLibraryError((#NimStringDesc*) &$2);$n", 57); STRING_LITERAL(T839829468_232, "if (!($1 = #nimLoadLibrary($2))) #nimLoadLibraryError($2);$n", 60); STRING_LITERAL(T839829468_233, "loadDynamicLib", 14); STRING_LITERAL(T839829468_234, "Dl_$1", 5); STRING_LITERAL(T839829468_235, "\011$1 = ($2) ($3$4));$n", 21); NIM_CONST TY201018 T839829468_236 = {((NimStringDesc*) &T839829468_10), ((NI) 535)} ; STRING_LITERAL(T839829468_237, "wrong index: ", 13); STRING_LITERAL(T839829468_238, "\011$1 = ($2) #nimGetProcAddr($3, $4);$n", 37); STRING_LITERAL(T839829468_239, "$2 $1;$n", 8); STRING_LITERAL(T839829468_240, "extern ", 7); STRING_LITERAL(T839829468_241, "NIM_THREADVAR ", 14); STRING_LITERAL(T839829468_242, " $1;$n", 6); STRING_LITERAL(T839829468_243, "cgsym: ", 7); STRING_LITERAL(T839829468_244, ": ", 2); STRING_LITERAL(T839829468_245, "extern $1 $2;$n", 15); STRING_LITERAL(T839829468_246, "extern \"C\" ", 11); STRING_LITERAL(T839829468_247, " __attribute__((naked))", 23); STRING_LITERAL(T839829468_248, " __attribute__((noreturn))", 26); STRING_LITERAL(T839829468_249, "#asgnRef((void**) $1, $2);$n", 28); STRING_LITERAL(T839829468_250, "#asgnRefNoCycle((void**) $1, $2);$n", 35); STRING_LITERAL(T839829468_251, "#unsureAsgnRef((void**) $1, $2);$n", 34); STRING_LITERAL(T839829468_252, "#genericSeqAssign($1, $2, $3);$n", 32); STRING_LITERAL(T839829468_253, "$1 = #copyString($2);$n", 23); STRING_LITERAL(T839829468_254, "$3 = $1; $1 = #copyStringRC1($2);$n", 35); STRING_LITERAL(T839829468_255, "if ($1) #nimGCunrefNoCycle($1);$n", 33); STRING_LITERAL(T839829468_256, "#unsureAsgnRef((void**) $1, #copyString($2));$n", 47); STRING_LITERAL(T839829468_257, ".", 1); STRING_LITERAL(T839829468_258, "ClEnv", 5); STRING_LITERAL(T839829468_259, "$1.ClPrc = $2.ClPrc;$n", 22); STRING_LITERAL(T839829468_260, "Field$1", 7); STRING_LITERAL(T839829468_261, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($3));$n", 53); STRING_LITERAL(T839829468_262, "#genericShallowAssign((void*)$1, (void*)$2, $3);$n", 50); STRING_LITERAL(T839829468_263, "#genericAssign((void*)$1, (void*)$2, $3);$n", 43); STRING_LITERAL(T839829468_265, "compiler/ccgexprs.nim", 21); NIM_CONST TY201018 T839829468_264 = {((NimStringDesc*) &T839829468_265), ((NI) 320)} ; STRING_LITERAL(T839829468_266, "#genericAssignOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 60); STRING_LITERAL(T839829468_267, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($1[0])*$1Len0);$n", 63); STRING_LITERAL(T839829468_268, "memcpy((void*)$1, (NIM_CONST void*)$2, $3);$n", 45); STRING_LITERAL(T839829468_269, "genAssignment: ", 15); STRING_LITERAL(T839829468_270, "request to generate code for .compileTime proc: ", 48); STRING_LITERAL(T839829468_271, "expr: proc not init ", 20); STRING_LITERAL(T839829468_272, "NIM_CONST $1 $2 = $3;$n", 23); STRING_LITERAL(T839829468_273, "{$n", 3); STRING_LITERAL(T839829468_274, "0x$1,$n", 7); STRING_LITERAL(T839829468_275, "0x$1, ", 6); STRING_LITERAL(T839829468_276, "0x$1}$n", 7); STRING_LITERAL(T839829468_277, "{{$1, $1}", 9); STRING_LITERAL(T839829468_278, ", {", 3); STRING_LITERAL(T839829468_279, ",$n", 3); STRING_LITERAL(T839829468_280, "}", 1); STRING_LITERAL(T839829468_281, "NIM_CONST struct {$n #TGenericSeq Sup;$n $1 data[$2];$n} $3 =" " $4;$n", 69); STRING_LITERAL(T839829468_282, "(($1)&$2)", 9); STRING_LITERAL(T839829468_283, "$1,$n", 5); STRING_LITERAL(T839829468_284, "extern NIM_CONST $1 $2;$n", 25); STRING_LITERAL(T839829468_285, "expr: var not init ", 19); STRING_LITERAL(T839829468_286, "\011NimThreadVars* NimTV;$n", 24); STRING_LITERAL(T839829468_287, "\011NimTV = (NimThreadVars*) #GetThreadLocalVars();$n", 50); STRING_LITERAL(T839829468_288, "NimTV->", 7); STRING_LITERAL(T839829468_289, "expr: temp not init ", 20); STRING_LITERAL(T839829468_290, "expr: param not init ", 21); STRING_LITERAL(T839829468_291, "expr(", 5); STRING_LITERAL(T839829468_292, "); unknown symbol", 17); STRING_LITERAL(T839829468_293, "//", 2); STRING_LITERAL(T839829468_294, "#endb($1, $2);$n", 16); STRING_LITERAL(T839829468_295, "nimln($1, $2);$n", 16); STRING_LITERAL(T839829468_296, "LA", 2); STRING_LITERAL(T839829468_297, "if ($1) goto $2;$n", 18); STRING_LITERAL(T839829468_298, "if (!($1)) goto $2;$n", 21); STRING_LITERAL(T839829468_299, "$1: ;$n", 7); STRING_LITERAL(T839829468_300, "!($1)", 5); STRING_LITERAL(T839829468_301, "$1", 2); STRING_LITERAL(T839829468_302, "($3)((NU$2) ~($1))", 18); STRING_LITERAL(T839829468_303, "-($1)", 5); STRING_LITERAL(T839829468_304, "($1 > 0? ($1) : -($1))", 22); STRING_LITERAL(T839829468_305, "(($3)(NU)(NU8)($1))", 19); STRING_LITERAL(T839829468_306, "(($3)(NU64)(NU8)($1))", 21); STRING_LITERAL(T839829468_307, "(($3)(NU)(NU16)($1))", 20); STRING_LITERAL(T839829468_308, "(($3)(NU64)(NU16)($1))", 22); STRING_LITERAL(T839829468_309, "(($3)(NU64)(NU32)($1))", 22); STRING_LITERAL(T839829468_310, "(($3)(NU64)(NU)($1))", 20); STRING_LITERAL(T839829468_311, "(($3)(NU8)(NU)($1))", 19); STRING_LITERAL(T839829468_312, "(($3)(NU16)(NU)($1))", 20); STRING_LITERAL(T839829468_313, "(($3)(NU32)(NU64)($1))", 22); STRING_LITERAL(T839829468_314, "((double) ($1))", 15); STRING_LITERAL(T839829468_315, "float64ToInt32($1)", 18); STRING_LITERAL(T839829468_316, "float64ToInt64($1)", 18); NIM_CONST TY550655 unarithtab_550653_839829468 = {((NimStringDesc*) &T839829468_300), ((NimStringDesc*) &T839829468_301), ((NimStringDesc*) &T839829468_302), ((NimStringDesc*) &T839829468_301), ((NimStringDesc*) &T839829468_303), ((NimStringDesc*) &T839829468_304), ((NimStringDesc*) &T839829468_305), ((NimStringDesc*) &T839829468_306), ((NimStringDesc*) &T839829468_307), ((NimStringDesc*) &T839829468_308), ((NimStringDesc*) &T839829468_309), ((NimStringDesc*) &T839829468_310), ((NimStringDesc*) &T839829468_311), ((NimStringDesc*) &T839829468_312), ((NimStringDesc*) &T839829468_313), ((NimStringDesc*) &T839829468_314), ((NimStringDesc*) &T839829468_314), ((NimStringDesc*) &T839829468_315), ((NimStringDesc*) &T839829468_316)} ; STRING_LITERAL(T839829468_317, "if ($1 == $2) #raiseOverflow();$n", 33); STRING_LITERAL(T839829468_318, "((NI$2)-($1))", 13); NIM_CONST TY549642 opr_549640_839829468 = {((NimStringDesc*) &T839829468_318), ((NimStringDesc*) &T839829468_303), ((NimStringDesc*) &T839829468_304)} ; STRING_LITERAL(T839829468_319, "(($4)($2) $1 ($4)($3))", 22); STRING_LITERAL(T839829468_320, "+", 1); STRING_LITERAL(T839829468_321, "-", 1); STRING_LITERAL(T839829468_322, "/", 1); NIM_CONST TY554764 opr_554762_839829468 = {((NimStringDesc*) &T839829468_320), ((NimStringDesc*) &T839829468_321), ((NimStringDesc*) &T839829468_53), ((NimStringDesc*) &T839829468_322)} ; STRING_LITERAL(T839829468_323, "#nanCheck($1);$n", 16); STRING_LITERAL(T839829468_324, "#infCheck($1);$n", 16); STRING_LITERAL(T839829468_325, "(($4)($1) + ($4)($2))", 21); STRING_LITERAL(T839829468_326, "(($4)($1) - ($4)($2))", 21); STRING_LITERAL(T839829468_327, "(($4)($1) * ($4)($2))", 21); STRING_LITERAL(T839829468_328, "(($4)($1) / ($4)($2))", 21); STRING_LITERAL(T839829468_329, "($4)((NU$3)($1) >> (NU$3)($2))", 30); STRING_LITERAL(T839829468_330, "($4)((NU$3)($1) << (NU$3)($2))", 30); STRING_LITERAL(T839829468_331, "($4)($1 & $2)", 13); STRING_LITERAL(T839829468_332, "($4)($1 | $2)", 13); STRING_LITERAL(T839829468_333, "($4)($1 ^ $2)", 13); STRING_LITERAL(T839829468_334, "(($1 <= $2) ? $1 : $2)", 22); STRING_LITERAL(T839829468_335, "(($1 >= $2) ? $1 : $2)", 22); STRING_LITERAL(T839829468_336, "($4)((NU$3)($1) + (NU$3)($2))", 29); STRING_LITERAL(T839829468_337, "($4)((NU$3)($1) - (NU$3)($2))", 29); STRING_LITERAL(T839829468_338, "($4)((NU$3)($1) * (NU$3)($2))", 29); STRING_LITERAL(T839829468_339, "($4)((NU$3)($1) / (NU$3)($2))", 29); STRING_LITERAL(T839829468_340, "($4)((NU$3)($1) % (NU$3)($2))", 29); STRING_LITERAL(T839829468_341, "($1 == $2)", 10); STRING_LITERAL(T839829468_342, "($1 <= $2)", 10); STRING_LITERAL(T839829468_343, "($1 < $2)", 9); STRING_LITERAL(T839829468_344, "((NU$3)($1) <= (NU$3)($2))", 26); STRING_LITERAL(T839829468_345, "((NU$3)($1) < (NU$3)($2))", 25); STRING_LITERAL(T839829468_346, "((NU64)($1) <= (NU64)($2))", 26); STRING_LITERAL(T839829468_347, "((NU64)($1) < (NU64)($2))", 25); STRING_LITERAL(T839829468_348, "((NU8)($1) == (NU8)($2))", 24); STRING_LITERAL(T839829468_349, "((NU8)($1) <= (NU8)($2))", 24); STRING_LITERAL(T839829468_350, "((NU8)($1) < (NU8)($2))", 23); STRING_LITERAL(T839829468_351, "($1 != $2)", 10); NIM_CONST TY549828 binarithtab_549826_839829468 = {((NimStringDesc*) &T839829468_325), ((NimStringDesc*) &T839829468_326), ((NimStringDesc*) &T839829468_327), ((NimStringDesc*) &T839829468_328), ((NimStringDesc*) &T839829468_329), ((NimStringDesc*) &T839829468_330), ((NimStringDesc*) &T839829468_331), ((NimStringDesc*) &T839829468_332), ((NimStringDesc*) &T839829468_333), ((NimStringDesc*) &T839829468_334), ((NimStringDesc*) &T839829468_335), ((NimStringDesc*) &T839829468_334), ((NimStringDesc*) &T839829468_335), ((NimStringDesc*) &T839829468_336), ((NimStringDesc*) &T839829468_337), ((NimStringDesc*) &T839829468_338), ((NimStringDesc*) &T839829468_339), ((NimStringDesc*) &T839829468_340), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_344), ((NimStringDesc*) &T839829468_345), ((NimStringDesc*) &T839829468_346), ((NimStringDesc*) &T839829468_347), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_348), ((NimStringDesc*) &T839829468_349), ((NimStringDesc*) &T839829468_350), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_341), ((NimStringDesc*) &T839829468_342), ((NimStringDesc*) &T839829468_343), ((NimStringDesc*) &T839829468_351)} ; STRING_LITERAL(T839829468_352, "($1.ClPrc == $2.ClPrc && $1.ClEnv == $2.ClEnv)", 46); STRING_LITERAL(T839829468_353, "($#)($# + $#)", 13); STRING_LITERAL(T839829468_354, "($#)($# - $#)", 13); STRING_LITERAL(T839829468_355, "($#)($# * $#)", 13); STRING_LITERAL(T839829468_356, "($#)($# / $#)", 13); STRING_LITERAL(T839829468_357, "($#)($# % $#)", 13); NIM_CONST TY549281 opr_549279_839829468 = {((NimStringDesc*) &T839829468_353), ((NimStringDesc*) &T839829468_354), ((NimStringDesc*) &T839829468_355), ((NimStringDesc*) &T839829468_356), ((NimStringDesc*) &T839829468_357), ((NimStringDesc*) &T839829468_353), ((NimStringDesc*) &T839829468_354)} ; STRING_LITERAL(T839829468_358, "((NU8)($1))", 11); STRING_LITERAL(T839829468_359, "if ($1 < $2 || $1 > $3) #raiseOverflow();$n", 43); STRING_LITERAL(T839829468_360, "$# = #addInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_361, "$# = #subInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_362, "$# = #mulInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_363, "$# = #divInt64($#, $#);$n", 25); STRING_LITERAL(T839829468_364, "$# = #modInt64($#, $#);$n", 25); NIM_CONST TY549281 prc64_549274_839829468 = {((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361), ((NimStringDesc*) &T839829468_362), ((NimStringDesc*) &T839829468_363), ((NimStringDesc*) &T839829468_364), ((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361)} ; STRING_LITERAL(T839829468_365, "$# = #addInt($#, $#);$n", 23); STRING_LITERAL(T839829468_366, "$# = #subInt($#, $#);$n", 23); STRING_LITERAL(T839829468_367, "$# = #mulInt($#, $#);$n", 23); STRING_LITERAL(T839829468_368, "$# = #divInt($#, $#);$n", 23); STRING_LITERAL(T839829468_369, "$# = #modInt($#, $#);$n", 23); NIM_CONST TY549281 prc_549269_839829468 = {((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366), ((NimStringDesc*) &T839829468_367), ((NimStringDesc*) &T839829468_368), ((NimStringDesc*) &T839829468_369), ((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366)} ; STRING_LITERAL(T839829468_370, "($#)($#)", 8); STRING_LITERAL(T839829468_371, "#reprInt((NI64)$1)", 18); STRING_LITERAL(T839829468_372, "#reprFloat($1)", 14); STRING_LITERAL(T839829468_373, "#reprBool($1)", 13); STRING_LITERAL(T839829468_374, "#reprChar($1)", 13); STRING_LITERAL(T839829468_375, "#reprEnum((NI)$1, $2)", 21); STRING_LITERAL(T839829468_376, "#reprStr($1)", 12); STRING_LITERAL(T839829468_377, "#reprSet($1, $2)", 16); STRING_LITERAL(T839829468_378, "$1, $1Len0", 10); STRING_LITERAL(T839829468_379, "$1->data, $1->$2", 16); STRING_LITERAL(T839829468_380, "$1, $2", 6); STRING_LITERAL(T839829468_381, "genRepr()", 9); STRING_LITERAL(T839829468_382, "#reprOpenArray($1, $2)", 22); STRING_LITERAL(T839829468_383, "#reprAny($1, $2)", 16); STRING_LITERAL(T839829468_384, "\'repr\' doesn\'t support \'void\' type", 34); STRING_LITERAL(T839829468_385, "($1 - 1)", 8); STRING_LITERAL(T839829468_386, "#subInt($1, 1)", 14); STRING_LITERAL(T839829468_387, "binaryStmt", 10); STRING_LITERAL(T839829468_388, "$1 += $2;$n", 11); STRING_LITERAL(T839829468_389, "$1 -= $2;$n", 11); NIM_CONST TY555052 opr_555050_839829468 = {((NimStringDesc*) &T839829468_388), ((NimStringDesc*) &T839829468_389)} ; NIM_CONST TY555052 fun64_555055_839829468 = {((NimStringDesc*) &T839829468_360), ((NimStringDesc*) &T839829468_361)} ; NIM_CONST TY555052 fun_555060_839829468 = {((NimStringDesc*) &T839829468_365), ((NimStringDesc*) &T839829468_366)} ; STRING_LITERAL(T839829468_390, "#appendChar($1, $2);$n", 22); STRING_LITERAL(T839829468_391, "$1->$2 + ", 9); STRING_LITERAL(T839829468_392, "#appendString($1, $2);$n", 24); STRING_LITERAL(T839829468_393, "$1 = #rawNewString($2$3);$n", 27); STRING_LITERAL(T839829468_394, "$1 = #addChar($1, $2);$n", 24); STRING_LITERAL(T839829468_395, "$1 = #resizeString($1, $2$3);$n", 31); STRING_LITERAL(T839829468_396, "$1 = ($2) #incrSeqV2(&($1)->Sup, sizeof($3));$n", 47); STRING_LITERAL(T839829468_397, "$1 = ($2) #incrSeqV2($1, sizeof($3));$n", 39); STRING_LITERAL(T839829468_398, "$1->data[$1->$2]", 16); STRING_LITERAL(T839829468_399, "++$1->$2;$n", 11); STRING_LITERAL(T839829468_400, "(($1) && ($1)->$2 == 0)", 23); STRING_LITERAL(T839829468_401, "#eqStrings($1, $2)", 18); STRING_LITERAL(T839829468_402, "(#cmpStrings($1, $2) <= 0)", 26); STRING_LITERAL(T839829468_403, "(#cmpStrings($1, $2) < 0)", 25); STRING_LITERAL(T839829468_404, "$1.ClPrc == 0", 13); STRING_LITERAL(T839829468_405, "$1 == 0", 7); STRING_LITERAL(T839829468_406, "#nimIntToStr($1)", 16); STRING_LITERAL(T839829468_407, "#nimInt64ToStr($1)", 18); STRING_LITERAL(T839829468_408, "#nimBoolToStr($1)", 17); STRING_LITERAL(T839829468_409, "#nimCharToStr($1)", 17); STRING_LITERAL(T839829468_410, "#nimFloatToStr($1)", 18); STRING_LITERAL(T839829468_411, "#cstrToNimstr($1)", 17); STRING_LITERAL(T839829468_412, "no \'of\' operator available for pure objects", 43); STRING_LITERAL(T839829468_413, "(($1) && ($2))", 14); STRING_LITERAL(T839829468_414, "$1.m_type == $2", 15); STRING_LITERAL(T839829468_415, "Nim_OfCheck_CACHE", 17); STRING_LITERAL(T839829468_416, "static TNimType* $#[2];$n", 25); STRING_LITERAL(T839829468_417, "#isObjWithCache($#.m_type, $#, $#)", 34); STRING_LITERAL(T839829468_418, "($1)", 4); STRING_LITERAL(T839829468_419, "sizeof($1)", 10); STRING_LITERAL(T839829468_420, "if ($1) #nimGCunref($1);$n", 26); STRING_LITERAL(T839829468_421, "($1) #newObjRC1($2, $3)", 23); STRING_LITERAL(T839829468_422, "($1) #newObj($2, $3)", 20); STRING_LITERAL(T839829468_423, "$1->finalizer = (void*)$2;$n", 28); STRING_LITERAL(T839829468_424, "($1) #newObj($2, sizeof($3))", 28); STRING_LITERAL(T839829468_425, "($1) #newSeqRC1($2, $3)", 23); STRING_LITERAL(T839829468_426, "($1) #newSeq($2, $3)", 20); STRING_LITERAL(T839829468_427, "($1)#nimNewSeqOfCap($2, $3)", 27); STRING_LITERAL(T839829468_428, "((NI)sizeof($1))", 16); STRING_LITERAL(T839829468_429, "(*($1*) ($2))", 13); STRING_LITERAL(T839829468_430, "(($1) ($2))", 11); STRING_LITERAL(T839829468_431, "($1Len0-1)", 10); STRING_LITERAL(T839829468_432, "$1Len0", 6); STRING_LITERAL(T839829468_433, "($1 ? (strlen($1)-1) : -1)", 26); STRING_LITERAL(T839829468_434, "($1 ? strlen($1) : 0)", 21); STRING_LITERAL(T839829468_435, "($1 ? ($1->Sup.len-1) : -1)", 27); STRING_LITERAL(T839829468_436, "($1 ? $1->Sup.len : 0)", 22); STRING_LITERAL(T839829468_437, "($1 ? ($1->len-1) : -1)", 23); STRING_LITERAL(T839829468_438, "($1 ? $1->len : 0)", 18); STRING_LITERAL(T839829468_439, "genArrayLen()", 13); STRING_LITERAL(T839829468_440, "($1->Sup.len)", 13); STRING_LITERAL(T839829468_441, "$1->len", 7); STRING_LITERAL(T839829468_442, "unaryStmt", 9); STRING_LITERAL(T839829468_443, "#nimGCref($1);$n", 16); STRING_LITERAL(T839829468_444, "#nimGCunref($1);$n", 18); STRING_LITERAL(T839829468_445, "$1 = #setLengthStr($1, $2);$n", 29); STRING_LITERAL(T839829468_446, "$1 = ($3) #setLengthSeq(&($1)->Sup, sizeof($4), $2);$n", 54); STRING_LITERAL(T839829468_447, "$1 = ($3) #setLengthSeq($1, sizeof($4), $2);$n", 46); STRING_LITERAL(T839829468_448, "($1- $2)", 8); STRING_LITERAL(T839829468_449, "$1 |= ((", 8); STRING_LITERAL(T839829468_450, ")1)<<(($2)%(sizeof(", 19); STRING_LITERAL(T839829468_451, ")*8));$n", 8); STRING_LITERAL(T839829468_452, "$1 &= ~(((", 10); STRING_LITERAL(T839829468_453, ")1) << (($2) % (sizeof(", 23); STRING_LITERAL(T839829468_454, ")*8)));$n", 9); STRING_LITERAL(T839829468_455, "#countBits32($1)", 16); STRING_LITERAL(T839829468_456, "#countBits64($1)", 16); STRING_LITERAL(T839829468_457, "(($1 & ~ $2 ==0)&&($1 != $2))", 29); STRING_LITERAL(T839829468_458, "(($1 & ~ $2)==0)", 16); STRING_LITERAL(T839829468_459, "($1 & $2)", 9); STRING_LITERAL(T839829468_460, "($1 | $2)", 9); STRING_LITERAL(T839829468_461, "($1 & ~ $2)", 11); STRING_LITERAL(T839829468_462, "($1 ^ $2)", 9); STRING_LITERAL(T839829468_463, "fewCmps", 7); STRING_LITERAL(T839829468_464, "$1 >= $2 && $1 <= $3", 20); STRING_LITERAL(T839829468_465, "$1 == $2", 8); STRING_LITERAL(T839829468_466, " || ", 4); STRING_LITERAL(T839829468_467, "(($1 &(1U<<((NU)($2)&7U)))!=0)", 30); STRING_LITERAL(T839829468_468, "(($1 &(1U<<((NU)($2)&15U)))!=0)", 31); STRING_LITERAL(T839829468_469, "(($1 &(1U<<((NU)($2)&31U)))!=0)", 31); STRING_LITERAL(T839829468_470, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36); STRING_LITERAL(T839829468_471, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43); STRING_LITERAL(T839829468_472, "genSetOp()", 10); STRING_LITERAL(T839829468_473, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34); STRING_LITERAL(T839829468_474, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36); STRING_LITERAL(T839829468_475, "#cardSet($1, ", 13); STRING_LITERAL(T839829468_476, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == " "0);$n if (!$3) break;}$n", 88); STRING_LITERAL(T839829468_477, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == " "0);$n if (!$3) break;}$nif ($3) $3 = (memcmp($4, $5, $2) != 0);" "$n", 129); STRING_LITERAL(T839829468_478, "|", 1); STRING_LITERAL(T839829468_479, "& ~", 3); STRING_LITERAL(T839829468_480, "^", 1); NIM_CONST TY554428 lookupopr_554426_839829468 = {((NimStringDesc*) &T839829468_476), ((NimStringDesc*) &T839829468_477), ((NimStringDesc*) &T839829468_52), ((NimStringDesc*) &T839829468_478), ((NimStringDesc*) &T839829468_479), ((NimStringDesc*) &T839829468_480)} ; STRING_LITERAL(T839829468_481, "(memcmp($1, $2, ", 16); STRING_LITERAL(T839829468_482, ")==0)", 5); STRING_LITERAL(T839829468_483, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60); STRING_LITERAL(T839829468_484, "genSetOp", 8); STRING_LITERAL(T839829468_485, "$1->data", 8); STRING_LITERAL(T839829468_486, "($1)+($2), ($3)-($2)+1", 22); STRING_LITERAL(T839829468_487, "(*$1)->data+($2), ($3)-($2)+1", 29); STRING_LITERAL(T839829468_488, "$1->data+($2), ($3)-($2)+1", 26); STRING_LITERAL(T839829468_489, "openArrayLoc: ", 14); STRING_LITERAL(T839829468_490, "", 0); STRING_LITERAL(T839829468_491, "(*$1)->data, (*$1)->$2", 22); STRING_LITERAL(T839829468_492, "$1.ClPrc($3$1.ClEnv)", 20); STRING_LITERAL(T839829468_493, "$1.ClEnv? $1.ClPrc($3$1.ClEnv):(($4)($1.ClPrc))($2)", 51); STRING_LITERAL(T839829468_494, "$1 = 0;$n", 9); STRING_LITERAL(T839829468_495, "#chckNil((void*)$1);$n", 22); STRING_LITERAL(T839829468_496, "#genericReset((void*)$1, $2);$n", 31); STRING_LITERAL(T839829468_497, ";$n", 3); STRING_LITERAL(T839829468_499, "compiler/ccgcalls.nim", 21); NIM_CONST TY201018 T839829468_498 = {((NimStringDesc*) &T839829468_499), ((NI) 423)} ; static NIM_CONST char136Set T839829468_500 = { 0x00, 0x00, 0x00, 0x00, 0x88, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} ; STRING_LITERAL(T839829468_501, "wrong argument count", 20); STRING_LITERAL(T839829468_502, "call expression expected for C++ pattern", 40); NIM_CONST TY201018 T839829468_503 = {((NimStringDesc*) &T839829468_499), ((NI) 328)} ; STRING_LITERAL(T839829468_504, "->", 2); STRING_LITERAL(T839829468_505, ");$n", 4); STRING_LITERAL(T839829468_506, "[", 1); NIM_CONST TY201018 T839829468_507 = {((NimStringDesc*) &T839829468_499), ((NI) 472)} ; STRING_LITERAL(T839829468_508, "varargs for objective C method?", 31); STRING_LITERAL(T839829468_509, "Result: ", 8); STRING_LITERAL(T839829468_510, "];$n", 4); STRING_LITERAL(T839829468_511, "]", 1); NIM_CONST TY201018 T839829468_512 = {((NimStringDesc*) &T839829468_265), ((NI) 925)} ; STRING_LITERAL(T839829468_513, "<stdio.h>", 9); STRING_LITERAL(T839829468_514, ", \"nil\"", 7); STRING_LITERAL(T839829468_515, ", $1? ($1)->data:\"nil\"", 22); STRING_LITERAL(T839829468_516, "printf($1$2);$n", 15); STRING_LITERAL(T839829468_517, "%s", 2); STRING_LITERAL(T839829468_518, "fflush(stdout);$n", 17); STRING_LITERAL(T839829468_519, "#genericDeepCopy((void*)$1, (void*)$2, $3);$n", 45); STRING_LITERAL(T839829468_520, "#genericSeqDeepCopy($1, $2, $3);$n", 34); STRING_LITERAL(T839829468_521, "#genericDeepCopyOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 62); STRING_LITERAL(T839829468_522, "genDeepCopy: ", 13); STRING_LITERAL(T839829468_523, "genMagicExpr: ", 14); STRING_LITERAL(T839829468_524, "static NIM_CONST $1 $2 = $3;$n", 30); STRING_LITERAL(T839829468_525, "memset($1, 0, sizeof($1));$n", 28); STRING_LITERAL(T839829468_526, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1" ")&7U));$n", 72); STRING_LITERAL(T839829468_527, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40); STRING_LITERAL(T839829468_528, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=((", 39); STRING_LITERAL(T839829468_529, ")(1)<<(($1)%(sizeof(", 20); STRING_LITERAL(T839829468_530, "$1 |=((", 7); STRING_LITERAL(T839829468_531, ")(1)<<(($2)%(sizeof(", 20); STRING_LITERAL(T839829468_532, "genCheckedRecordField", 21); STRING_LITERAL(T839829468_533, "genObjConstr", 12); STRING_LITERAL(T839829468_534, "if ($1) #raiseFieldError(((#NimStringDesc*) &$2));$n", 52); STRING_LITERAL(T839829468_535, "if (!($1)) #raiseFieldError(((#NimStringDesc*) &$2));$n", 55); STRING_LITERAL(T839829468_536, "LOC$1.source", 12); STRING_LITERAL(T839829468_537, "union { $1 source; $2 dest; } LOC$3;$n", 38); STRING_LITERAL(T839829468_538, "LOC$#.dest", 10); STRING_LITERAL(T839829468_539, "if ((NU)($1) > (NU)($2)) #raiseIndexError();$n", 46); STRING_LITERAL(T839829468_540, "if ($1 < $2 || $1 > $3) #raiseIndexError();$n", 45); STRING_LITERAL(T839829468_541, "$1[($2)- $3]", 12); STRING_LITERAL(T839829468_542, "if ((NU)($1) >= (NU)($2Len0)) #raiseIndexError();$n", 51); STRING_LITERAL(T839829468_543, "if ((NU)($1) > (NU)($2->$3)) #raiseIndexError();$n", 50); STRING_LITERAL(T839829468_544, "if ((NU)($1) >= (NU)($2->$3)) #raiseIndexError();$n", 51); STRING_LITERAL(T839829468_545, "genTupleElem", 12); STRING_LITERAL(T839829468_546, ".Field$1", 8); STRING_LITERAL(T839829468_547, "expr(nkBracketExpr, ", 20); STRING_LITERAL(T839829468_548, "genDeref ", 9); STRING_LITERAL(T839829468_549, "genRecordFieldAux", 17); STRING_LITERAL(T839829468_550, "genRecordField 3", 16); STRING_LITERAL(T839829468_551, ".$1", 3); STRING_LITERAL(T839829468_552, "} $1: ;$n", 9); STRING_LITERAL(T839829468_553, "FR.len-=$1;$n", 13); STRING_LITERAL(T839829468_554, "FR.len+=$1;$n", 13); STRING_LITERAL(T839829468_555, "if (!$1) goto $2;$n", 19); STRING_LITERAL(T839829468_556, "goto $1;$n", 10); STRING_LITERAL(T839829468_557, "genIf()", 7); STRING_LITERAL(T839829468_558, "->Sup", 5); STRING_LITERAL(T839829468_559, "$1 = &$2;$n", 11); STRING_LITERAL(T839829468_560, "if ($1) #chckObj($2.m_type, $3);$n", 34); STRING_LITERAL(T839829468_561, "#chckObj($1.m_type, $2);$n", 26); STRING_LITERAL(T839829468_562, "(($1)#$5($2, $3, $4))", 21); STRING_LITERAL(T839829468_563, "chckRangeF", 10); STRING_LITERAL(T839829468_564, "chckRange64", 11); STRING_LITERAL(T839829468_565, "chckRange", 9); STRING_LITERAL(T839829468_566, "CNSTCLOSURE", 11); STRING_LITERAL(T839829468_567, "closure to closure created", 26); STRING_LITERAL(T839829468_568, "$1.ClPrc = $2; $1.ClEnv = $3;$n", 31); STRING_LITERAL(T839829468_569, "while (1) {$n", 13); STRING_LITERAL(T839829468_570, "case statement must be exhaustive for computed goto", 51); STRING_LITERAL(T839829468_571, "case statement has too many cases for computed goto", 51); STRING_LITERAL(T839829468_572, "case statement has to start at 0 for computed goto", 50); STRING_LITERAL(T839829468_573, "no case statement found for computed goto", 41); STRING_LITERAL(T839829468_574, "TMP$1", 5); STRING_LITERAL(T839829468_575, "static void* $#[$#] = {", 23); STRING_LITERAL(T839829468_576, "&&TMP$#, ", 9); STRING_LITERAL(T839829468_577, "&&TMP$#};$n", 11); STRING_LITERAL(T839829468_578, "goto *$#[$#];$n", 15); STRING_LITERAL(T839829468_579, "range notation not available for computed goto", 46); STRING_LITERAL(T839829468_580, "TMP$#:$n", 8); STRING_LITERAL(T839829468_581, "#nimProfile();$n", 16); STRING_LITERAL(T839829468_582, "\'goto\' target must be a literal value", 37); STRING_LITERAL(T839829468_583, "goto NIMSTATE_$#;$n", 19); STRING_LITERAL(T839829468_584, "$1 = ($2*) #nimGetProcAddr($3, $4);$n", 37); STRING_LITERAL(T839829468_585, "$2* $1;$n", 9); STRING_LITERAL(T839829468_586, "#dbgRegisterGlobal($1, &$2, $3);$n", 34); STRING_LITERAL(T839829468_587, "#nimGCvisit((void*)$1, 0);$n", 28); STRING_LITERAL(T839829468_588, "N_NIMCALL(void, $1)(void)", 25); STRING_LITERAL(T839829468_589, "#nimRegisterGlobalMarker($1);$n", 31); STRING_LITERAL(T839829468_590, "$#($#);$n", 9); STRING_LITERAL(T839829468_591, "$# = $#;$n", 10); STRING_LITERAL(T839829468_592, "genVarTuple", 11); STRING_LITERAL(T839829468_593, "genConstStmt", 12); STRING_LITERAL(T839829468_594, "for statement not eliminated", 28); STRING_LITERAL(T839829468_595, "if (#eqStrings($1, $2)) goto $3;$n", 34); STRING_LITERAL(T839829468_596, "switch (#hashString($1) & $2) {$n", 33); STRING_LITERAL(T839829468_597, "case $1: $n$2break;$n", 21); STRING_LITERAL(T839829468_598, "goto LA$1;$n", 12); STRING_LITERAL(T839829468_599, "LA$1: ;$n", 9); STRING_LITERAL(T839829468_600, "if ($1 >= $2 && $1 <= $3) goto $4;$n", 36); STRING_LITERAL(T839829468_601, "if ($1 == $2) goto $3;$n", 24); STRING_LITERAL(T839829468_602, "NIMSTATE_$#:$n", 14); STRING_LITERAL(T839829468_603, "switch ($1) {$n", 15); STRING_LITERAL(T839829468_604, "default: __assume(0);$n", 23); STRING_LITERAL(T839829468_605, "#popSafePoint();$n", 18); STRING_LITERAL(T839829468_606, "#popCurrentException();$n", 25); STRING_LITERAL(T839829468_607, "if ($1.status != 0) #popCurrentException();$n", 45); STRING_LITERAL(T839829468_608, "goto BeforeRet;$n", 17); STRING_LITERAL(T839829468_609, "no loop to break", 16); STRING_LITERAL(T839829468_610, "extern $1", 9); STRING_LITERAL(T839829468_611, "#FieldDiscriminantCheck((NI)(NU)($1), (NI)(NU)($2), $3, $4);$n", 62); STRING_LITERAL(T839829468_612, "genAsmOrEmitStmt()", 18); STRING_LITERAL(T839829468_613, "\"", 1); STRING_LITERAL(T839829468_614, "\\n\"\015\012", 5); STRING_LITERAL(T839829468_615, "Exception", 9); STRING_LITERAL(T839829468_616, "E_Base", 6); STRING_LITERAL(T839829468_617, "try {$n", 7); STRING_LITERAL(T839829468_618, "} catch (NimException& $1) {$n", 30); STRING_LITERAL(T839829468_619, "#setFrame((TFrame*)&FR);$n", 26); STRING_LITERAL(T839829468_620, "else ", 5); STRING_LITERAL(T839829468_621, "#isObj($1.exp->m_type, $2)", 26); STRING_LITERAL(T839829468_622, "if ($1) ", 8); STRING_LITERAL(T839829468_623, "throw;$n", 8); STRING_LITERAL(T839829468_624, "<setjmp.h>", 10); STRING_LITERAL(T839829468_625, "#TSafePoint $1;$n", 17); STRING_LITERAL(T839829468_626, "#pushSafePoint(&$1);$n", 22); STRING_LITERAL(T839829468_627, "nimStdSetjmp", 12); STRING_LITERAL(T839829468_628, "$1.status = setjmp($1.context);$n", 33); STRING_LITERAL(T839829468_629, "nimSigSetjmp", 12); STRING_LITERAL(T839829468_630, "$1.status = sigsetjmp($1.context, 0);$n", 39); STRING_LITERAL(T839829468_631, "nimRawSetjmp", 12); STRING_LITERAL(T839829468_632, "$1.status = _setjmp($1.context);$n", 34); STRING_LITERAL(T839829468_633, "if ($1.status == 0) {$n", 23); STRING_LITERAL(T839829468_634, "else {$n", 8); STRING_LITERAL(T839829468_635, "else", 4); STRING_LITERAL(T839829468_636, "$1.status = 0;$n", 16); STRING_LITERAL(T839829468_637, "#isObj(#getCurrentException()->Sup.m_type, $1)", 46); STRING_LITERAL(T839829468_638, "#isObj(#getCurrentException()->m_type, $1)", 42); STRING_LITERAL(T839829468_639, "if ($1) {$n", 11); STRING_LITERAL(T839829468_640, "if ($1.status != 0) #reraiseException();$n", 42); STRING_LITERAL(T839829468_641, "#raiseException((#Exception*)$1, $2);$n", 39); STRING_LITERAL(T839829468_642, "#reraiseException();$n", 22); STRING_LITERAL(T839829468_643, "/*TYPESECTION*/", 15); STRING_LITERAL(T839829468_644, "/*VARSECTION*/", 14); STRING_LITERAL(T839829468_645, "/*INCLUDESECTION*/", 18); STRING_LITERAL(T839829468_646, "bp", 2); STRING_LITERAL(T839829468_647, "#dbgRegisterBreakpoint($1, (NCSTRING)$2, (NCSTRING)$3);$n", 57); STRING_LITERAL(T839829468_648, "#dbgRegisterWatchpoint($1, (NCSTRING)$2, $3);$n", 47); STRING_LITERAL(T839829468_649, "#pragma omp parallel for $4$nfor ($1 = $2; $1 <= $3; ++$1)", 58); STRING_LITERAL(T839829468_651, "compiler/ccgstmts.nim", 21); NIM_CONST TY201018 T839829468_650 = {((NimStringDesc*) &T839829468_651), ((NI) 145)} ; STRING_LITERAL(T839829468_652, "STATE$1: ;$n", 12); STRING_LITERAL(T839829468_653, "case -1: goto BeforeRet;$n", 26); STRING_LITERAL(T839829468_654, "case $1: goto STATE$1;$n", 24); STRING_LITERAL(T839829468_655, "if (((NI*) $1)[0] < 0) break;$n", 31); STRING_LITERAL(T839829468_656, "if ((((NI*) $1.ClEnv)[0]) < 0) break;$n", 39); STRING_LITERAL(T839829468_657, "); unknown node kind", 20); NIM_CONST TY201018 T839829468_658 = {((NimStringDesc*) &T839829468_651), ((NI) 1122)} ; STRING_LITERAL(T839829468_659, "Init000", 7); STRING_LITERAL(T839829468_660, "DatInit000", 10); STRING_LITERAL(T839829468_661, "NIM_EXTERNC N_NOINLINE(void, $1)(void);$N", 41); STRING_LITERAL(T839829468_662, "\011$1();$N", 8); STRING_LITERAL(T839829468_663, "N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDECL(void, NimMa" "in)(void) {$N\011void (*volatile inner)();$N\011PreMain();$N\011inner = N" "imMainInner;$N$2\011(*inner)();$N}$N$N", 162); STRING_LITERAL(T839829468_664, "N_STDCALL(int, WinMain)(HINSTANCE hCurInstance, $N " " HINSTANCE hPrevInstance, $N LP" "STR lpCmdLine, int nCmdShow) {$N\011NimMain();$N\011return nim_program" "_result;$N}$N$N", 206); STRING_LITERAL(T839829468_665, "N_LIB_EXPORT N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDEC" "L(void, NimMain)(void) {$N\011void (*volatile inner)();$N\011PreMain()" ";$N\011inner = NimMainInner;$N$2\011(*inner)();$N}$N$N", 175); STRING_LITERAL(T839829468_666, "BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fwdreason, $N " " LPVOID lpvReserved) {$N\011if(fwdreason == DLL_PROC" "ESS_ATTACH) {$N\011NimMain();$N}$N\011return 1;$N}$N$N", 175); STRING_LITERAL(T839829468_667, "<windows.h>", 11); STRING_LITERAL(T839829468_668, "void NIM_POSIX_INIT NimMainInit(void) {$N\011NimMain();$N}$N$N", 59); STRING_LITERAL(T839829468_669, "int cmdCount;$Nchar** cmdLine;$Nchar** gEnv;$NN_CDECL(void, Nim" "MainInner)(void) {$N$1}$N$NN_CDECL(void, NimMain)(void) {$N\011void" " (*volatile inner)();$N\011PreMain();$N\011inner = NimMainInner;$N$2\011(" "*inner)();$N}$N$N", 208); STRING_LITERAL(T839829468_670, "int main(void) {$N\011NimMain();$N\011return 0;$N}$N$N", 48); STRING_LITERAL(T839829468_671, "int main(int argc, char** args, char** env) {$N\011cmdLine = args;" "$N\011cmdCount = argc;$N\011gEnv = env;$N\011NimMain();$N\011return nim_prog" "ram_result;$N}$N$N", 145); STRING_LITERAL(T839829468_672, "dbgRegisterBreakpoint", 21); STRING_LITERAL(T839829468_673, "dbgRegisterFilename", 19); STRING_LITERAL(T839829468_674, "dbgRegisterFilename($1);$N", 26); STRING_LITERAL(T839829468_675, "\011#initStackBottomWith((void *)&inner);$N", 40); STRING_LITERAL(T839829468_676, "void PreMainInner() {$N\011systemInit000();$N$1$2$3}$N$Nvoid PreMa" "in() {$N\011void (*volatile inner)();$N\011systemDatInit000();$N\011inner" " = PreMainInner;$N$4$5\011(*inner)();$N}$N$N", 168); STRING_LITERAL(T839829468_677, "\011#initThreadVarsEmulation();$N", 30); STRING_LITERAL(T839829468_678, "still forwarded: ", 17); STRING_LITERAL(T839829468_679, "NIM_EXTERNC N_NOINLINE(void, $1)(void) {$N", 42); STRING_LITERAL(T839829468_680, "static #TNimNode $1[$2];$n", 26); STRING_LITERAL(T839829468_681, "static #TNimType $1[$2];$n", 26); STRING_LITERAL(T839829468_682, "\011TFrame FR; FR.len = 0;$N", 25); STRING_LITERAL(T839829468_683, "}$N$N", 5); STRING_LITERAL(T839829468_684, "N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 46); STRING_LITERAL(T839829468_685, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N", 131); STRING_LITERAL(T839829468_686, "0.15.0", 6); STRING_LITERAL(T839829468_687, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n" " $5 */$N", 201); extern NIM_CONST TY175082 Os_175068_4151366050; extern NIM_CONST TY175510 Cpu_175496_4151366050; STRING_LITERAL(T839829468_688, "#define NIM_INTBITS $1", 22); STRING_LITERAL(T839829468_689, "typedef struct {$1} NimThreadVars;$n", 36); STRING_LITERAL(T839829468_690, "#include \"nimbase.h\"", 20); STRING_LITERAL(T839829468_691, "#include \"$1\"$N", 15); STRING_LITERAL(T839829468_692, "#include $1$N", 13); STRING_LITERAL(T839829468_693, "extern \"C\"", 10); STRING_LITERAL(T839829468_694, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61); STRING_LITERAL(T839829468_695, "__$1__", 6); STRING_LITERAL(T839829468_696, "#ifndef $1$n#define $1$n", 24); STRING_LITERAL(T839829468_697, "N_CDECL(void, NimMain)(void);$n", 31); STRING_LITERAL(T839829468_698, "#endif /* $1 */$n", 17); Tcgen527027* generatedheader_530201_839829468; extern TNimType NTI527015; /* BModule */ Ropeobj177006* indent_530655_839829468; extern TNimType NTI177004; /* Rope */ extern Gcheap49418 gch_49458_1689653243; Ropeobj177006* nimtv_536656_839829468; Ttypeseq290836* nimtvdeps_536674_839829468; extern TNimType NTI290836; /* TTypeSeq */ Intset266030 nimtvdeclared_536675_839829468; extern TNimType NTI266030; /* IntSet */ NI breakpointid_546860_839829468; Ropeobj177006* gbreakpoints_546861_839829468; extern TY527153* gmodules_527170_3723162438; extern TNimType NTI527027; /* TCGen */ extern Debuginfo201009 gdebuginfo_201470_1926258066; extern Toption168009Set goptions_168128_2607990831; extern TNimType NTI290804; /* TSymSeq */ extern Tglobaloption168013Set gglobaloptions_168130_2607990831; extern NimStringDesc* headerfile_168138_2607990831; extern NimStringDesc* gprojectfull_168211_2607990831; extern Tcommands168076 gcmd_168132_2607990831; extern NI gerrorcounter_190072_155036129; extern Ropeobj177006* rnl_177903_2381377266; extern NI gforwardedprocscounter_527171_3723162438; extern TNimType NTI290244; /* TTypeKind */ extern TNimType NTI201017; /* seq[(string, int)] */ extern Tsystemcc271002 ccompiler_271431_2528170400; extern NimStringDesc* tnl_175644_4151366050; extern NI floatsize_175642_4151366050; extern Tgcmode168080 gselectedgc_168133_2607990831; extern TNimType NTI290020; /* TNodeKind */ extern TNimType NTI134602; /* seq[string] */ extern TNimType NTI290435; /* TSymKind */ extern TNimType NTI290816; /* TLoc */ extern NI intsize_175641_4151366050; extern TNimType NTI290524; /* TMagic */ extern TNimType NTI189350; /* seq[Rope] */ extern TNimType NTI290796; /* TNodeSeq */ extern Ropeobj177006* mainmodprocs_527148_3723162438; extern Ropeobj177006* maindatinit_527151_3723162438; extern Ropeobj177006* mainmodinit_527149_3723162438; extern Ropeobj177006* othermodsinit_527150_3723162438; extern Tsystemos175004 targetos_175629_4151366050; extern TY189612* fileinfos_189629_155036129; extern Tsystemcpu175452 targetcpu_175627_4151366050; extern Ropeobj177006* gmapping_527152_3723162438; N_NIMCALL(void, T839829468_2)(void) { nimGCvisit((void*)generatedheader_530201_839829468, 0); } N_NIMCALL(void, T839829468_3)(void) { nimGCvisit((void*)indent_530655_839829468, 0); } static N_INLINE(Cell46904*, usrtocell_51040_1689653243)(void* usr0) { Cell46904* result0; result0 = (Cell46904*)0; result0 = ((Cell46904*) ((NI)((NU32)(((NI) (usr0))) - (NU32)(((NI)sizeof(Cell46904)))))); return result0; } static N_INLINE(void, rtladdzct_52201_1689653243)(Cell46904* c0) { addzct_51017_1689653243((&gch_49458_1689653243.zct), c0); } static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0) { { Cell46904* c0; if (!!((src0 == NIM_NIL))) goto LA3; c0 = usrtocell_51040_1689653243(src0); (*c0).refcount += ((NI) 8); } LA3: ; { Cell46904* c0; if (!!(((*dest0) == NIM_NIL))) goto LA7; c0 = usrtocell_51040_1689653243((*dest0)); { (*c0).refcount -= ((NI) 8); if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA11; rtladdzct_52201_1689653243(c0); } LA11: ; } LA7: ; (*dest0) = src0; } N_NIMCALL(void, T839829468_5)(void) { nimGCvisit((void*)nimtv_536656_839829468, 0); } N_NIMCALL(void, T839829468_6)(void) { nimGCvisit((void*)nimtvdeps_536674_839829468, 0); } static N_INLINE(void, nimGCunrefNoCycle)(void* p0) { Cell46904* c0; c0 = usrtocell_51040_1689653243(p0); { (*c0).refcount -= ((NI) 8); if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3; rtladdzct_52201_1689653243(c0); } LA3: ; } N_NIMCALL(void, T839829468_7)(void) { nimGCvisit((void*)nimtvdeclared_536675_839829468.head, 0); nimGCvisit((void*)nimtvdeclared_536675_839829468.data, 0); } N_NIMCALL(void, T839829468_8)(void) { nimGCvisit((void*)gbreakpoints_546861_839829468, 0); } N_NIMCALL(Tcgen527027*, getcgenmodule_530226_839829468)(Tsym290834* s0) { Tcgen527027* result0; result0 = (Tcgen527027*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (((NI) 0) <= (*s0).position); if (!(LOC3)) goto LA4; LOC3 = ((*s0).position < (gmodules_527170_3723162438 ? gmodules_527170_3723162438->Sup.len : 0)); LA4: ; if (!LOC3) goto LA5; result0 = gmodules_527170_3723162438->data[(*s0).position]; } goto LA1; LA5: ; { result0 = NIM_NIL; } LA1: ; return result0; } static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0) { void* LOC1; LOC1 = (void*)0; LOC1 = memcpy(dest0, source0, ((size_t) (size0))); } static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0) { copymem_7485_1689653243(((void*) ((&(*dest0).data[((*dest0).Sup.len)- 0]))), ((void*) ((*src0).data)), ((NI) ((NI)((*src0).Sup.len + ((NI) 1))))); (*dest0).Sup.len += (*src0).Sup.len; } N_NIMCALL(NU32, hashowner_530977_839829468)(Tsym290834* s0) { NU32 result0; Tsym290834* m0; Tsym290834* p0; result0 = (NU32)0; m0 = s0; { while (1) { if (!!(((*m0).kind == ((Tsymkind290435) 6)))) goto LA2; m0 = (*m0).owner; } LA2: ; } p0 = (*m0).owner; result0 = register_201121_1926258066((&gdebuginfo_201470_1926258066), (*(*p0).name).s, (*(*m0).name).s); return result0; } static N_INLINE(void, incref_53019_1689653243)(Cell46904* c0) { (*c0).refcount = (NI)((NU32)((*c0).refcount) + (NU32)(((NI) 8))); } static N_INLINE(void, decref_52601_1689653243)(Cell46904* c0) { { (*c0).refcount -= ((NI) 8); if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3; rtladdzct_52201_1689653243(c0); } LA3: ; } static N_INLINE(void, asgnRef)(void** dest0, void* src0) { { Cell46904* LOC5; if (!!((src0 == NIM_NIL))) goto LA3; LOC5 = (Cell46904*)0; LOC5 = usrtocell_51040_1689653243(src0); incref_53019_1689653243(LOC5); } LA3: ; { Cell46904* LOC10; if (!!(((*dest0) == NIM_NIL))) goto LA8; LOC10 = (Cell46904*)0; LOC10 = usrtocell_51040_1689653243((*dest0)); decref_52601_1689653243(LOC10); } LA8: ; (*dest0) = src0; } N_NIMCALL(Toption168009Set, initprocoptions_560635_839829468)(Tcgen527027* m0) { Toption168009Set result0; memset((void*)(&result0), 0, sizeof(result0)); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 13))&31U)))!=0)) goto LA3; result0 = (goptions_168128_2607990831 & ~ 32768); } goto LA1; LA3: ; { result0 = goptions_168128_2607990831; } LA1: ; return result0; } N_NIMCALL(Tcproc527021*, newpreinitproc_560625_839829468)(Tcgen527027* m0) { Tcproc527021* result0; result0 = (Tcproc527021*)0; result0 = newproc_527206_3723162438(NIM_NIL, m0); (*result0).labels = ((NI) 100000); return result0; } N_NIMCALL(Tcproc527021*, newpostinitproc_560630_839829468)(Tcgen527027* m0) { Tcproc527021* result0; result0 = (Tcproc527021*)0; result0 = newproc_527206_3723162438(NIM_NIL, m0); (*result0).labels = ((NI) 200000); return result0; } N_NIMCALL(Ropeobj177006*, gettempname_531596_839829468)(Tcgen527027* m0) { Ropeobj177006* result0; Ropeobj177006* LOC1; result0 = (Ropeobj177006*)0; LOC1 = (Ropeobj177006*)0; LOC1 = rope_177401_2381377266(((NI64) ((*m0).labels))); result0 = HEX26_177418_2381377266((*m0).tmpbase, LOC1); (*m0).labels += ((NI) 1); return result0; } N_NIMCALL(Tcgen527027*, rawnewmodule_560663_839829468)(Tsym290834* module0, NimStringDesc* filename0) { Tcgen527027* result0; NimStringDesc* LOC1; NU32 LOC2; NimStringDesc* LOC3; NimStringDesc* LOC4; NimStringDesc* LOC5; result0 = (Tcgen527027*)0; result0 = (Tcgen527027*) newObj((&NTI527015), sizeof(Tcgen527027)); (*result0).Sup.Sup.m_type = (&NTI527027); LOC1 = (NimStringDesc*)0; LOC2 = (NU32)0; LOC2 = hashowner_530977_839829468(module0); LOC3 = (NimStringDesc*)0; LOC3 = HEX24_8401_1689653243(((NU64) (LOC2))); LOC1 = rawNewString(LOC3->Sup.len + 2); appendString(LOC1, ((NimStringDesc*) &T839829468_11)); appendString(LOC1, LOC3); appendString(LOC1, ((NimStringDesc*) &T839829468_12)); asgnRefNoCycle((void**) (&(*result0).tmpbase), rope_177277_2381377266(LOC1)); initlinkedlist_147031_3771138726((&(*result0).headerfiles)); initintset_266885_2627731572((&(*result0).declaredthings)); initintset_266885_2627731572((&(*result0).declaredprotos)); LOC4 = (NimStringDesc*)0; LOC4 = (*result0).cfilename; (*result0).cfilename = copyStringRC1(filename0); if (LOC4) nimGCunrefNoCycle(LOC4); LOC5 = (NimStringDesc*)0; LOC5 = (*result0).filename; (*result0).filename = copyStringRC1(filename0); if (LOC5) nimGCunrefNoCycle(LOC5); initidtable_294019_850551059((&(*result0).typecache)); initidtable_294019_850551059((&(*result0).forwtypecache)); asgnRefNoCycle((void**) (&(*result0).module), module0); initintset_266885_2627731572((&(*result0).typeinfomarker)); asgnRef((void**) (&(*result0).initproc), newproc_527206_3723162438(NIM_NIL, result0)); (*(*result0).initproc).options = initprocoptions_560635_839829468(result0); asgnRef((void**) (&(*result0).preinitproc), newpreinitproc_560625_839829468(result0)); asgnRef((void**) (&(*result0).postinitproc), newpostinitproc_560630_839829468(result0)); initnodetable_294085_850551059((&(*result0).datacache)); if ((*result0).typestack) nimGCunrefNoCycle((*result0).typestack); (*result0).typestack = (Ttypeseq290836*) newSeqRC1((&NTI290836), 0); if ((*result0).forwardedprocs) nimGCunrefNoCycle((*result0).forwardedprocs); (*result0).forwardedprocs = (Tsymseq290804*) newSeqRC1((&NTI290804), 0); asgnRefNoCycle((void**) (&(*result0).typenodesname), gettempname_531596_839829468(result0)); asgnRefNoCycle((void**) (&(*result0).nimtypesname), gettempname_531596_839829468(result0)); { if (!(((*module0).flags &(1U<<((NU)(((Tsymflag290184) 13))&31U)))!=0)) goto LA8; (*result0).flags |= ((NU8)1)<<((((Codegenflag527025) 0))%(sizeof(NU8)*8)); (*(*result0).preinitproc).options &= ~(((NU32)1) << ((((Toption168009) 15)) % (sizeof(NU32)*8))); (*(*result0).postinitproc).options &= ~(((NU32)1) << ((((Toption168009) 15)) % (sizeof(NU32)*8))); } LA8: ; return result0; } N_NIMCALL(Tcgen527027*, rawnewmodule_561038_839829468)(Tsym290834* module0) { Tcgen527027* result0; NimStringDesc* LOC1; result0 = (Tcgen527027*)0; LOC1 = (NimStringDesc*)0; LOC1 = tofullpath_190264_155036129(((NI32) ((*module0).position))); result0 = rawnewmodule_560663_839829468(module0, LOC1); return result0; } N_NIMCALL(Tcgen527027*, newmodule_561045_839829468)(Tsym290834* module0) { Tcgen527027* result0; result0 = (Tcgen527027*)0; { Tcgen527027* LOC3; NimStringDesc* LOC6; LOC3 = (Tcgen527027*)0; LOC3 = getcgenmodule_530226_839829468(module0); if (!!((LOC3 == NIM_NIL))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_194185_1689653243(T839829468_9); internalerror_194113_155036129(LOC6); } LA4: ; result0 = rawnewmodule_561038_839829468(module0); { if (!((gmodules_527170_3723162438 ? gmodules_527170_3723162438->Sup.len : 0) <= (*module0).position)) goto LA9; gmodules_527170_3723162438 = (TY527153*) setLengthSeq(&(gmodules_527170_3723162438)->Sup, sizeof(Tcgen527027*), ((NI) ((NI)((*module0).position + ((NI) 1))))); } LA9: ; asgnRef((void**) (&gmodules_527170_3723162438->data[(*module0).position]), result0); { if (!((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 2))&63U)))!=0)) goto LA13; { NimStringDesc* LOC19; NimStringDesc* LOC20; if (!(((*module0).flags &(1U<<((NU)(((Tsymflag290184) 25))&31U)))!=0)) goto LA17; LOC19 = (NimStringDesc*)0; LOC20 = (NimStringDesc*)0; LOC20 = tofilename_190260_155036129(((NI32) ((*module0).position))); LOC19 = rawNewString(LOC20->Sup.len + 28); appendString(LOC19, ((NimStringDesc*) &T839829468_13)); appendString(LOC19, LOC20); internalerror_194113_155036129(LOC19); } LA17: ; } LA13: ; return result0; } N_NIMCALL(Tpasscontext339002*, myopen_561115_839829468)(Tsym290834* module0) { Tpasscontext339002* result0; Tcgen527027* LOC1; result0 = (Tpasscontext339002*)0; LOC1 = (Tcgen527027*)0; LOC1 = newmodule_561045_839829468(module0); result0 = &LOC1->Sup; { NIM_BOOL LOC4; NimStringDesc* f0; NimStringDesc* LOC13; NimStringDesc* LOC14; LOC4 = (NIM_BOOL)0; LOC4 = ((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 27))&63U)))!=0); if (!(LOC4)) goto LA5; LOC4 = (generatedheader_530201_839829468 == NIM_NIL); LA5: ; if (!LOC4) goto LA6; { if (!(((NI) 0) < (headerfile_168138_2607990831 ? headerfile_168138_2607990831->Sup.len : 0))) goto LA10; f0 = headerfile_168138_2607990831; } goto LA8; LA10: ; { f0 = gprojectfull_168211_2607990831; } LA8: ; LOC13 = (NimStringDesc*)0; LOC13 = completecfilepath_271854_2528170400(f0, NIM_TRUE); LOC14 = (NimStringDesc*)0; LOC14 = noschangeFileExt(LOC13, ((NimStringDesc*) &T839829468_14)); asgnRef((void**) (&generatedheader_530201_839829468), rawnewmodule_560663_839829468(module0, LOC14)); (*generatedheader_530201_839829468).flags |= ((NU8)1)<<((((Codegenflag527025) 3))%(sizeof(NU8)*8)); } LA6: ; return result0; } N_NIMCALL(NimStringDesc*, getcfile_561204_839829468)(Tcgen527027* m0) { NimStringDesc* result0; NimStringDesc* ext0; NimStringDesc* LOC13; NimStringDesc* LOC14; result0 = (NimStringDesc*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; ext0 = copyString(((NimStringDesc*) &T839829468_15)); } goto LA1; LA5: ; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = (gcmd_168132_2607990831 == ((Tcommands168076) 3)); if (LOC8) goto LA9; LOC8 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 28))&31U)))!=0); LA9: ; if (!LOC8) goto LA10; ext0 = copyString(((NimStringDesc*) &T839829468_16)); } goto LA1; LA10: ; { ext0 = copyString(((NimStringDesc*) &T839829468_17)); } LA1: ; LOC13 = (NimStringDesc*)0; LOC13 = withpackagename_169065_2607990831((*m0).cfilename); LOC14 = (NimStringDesc*)0; LOC14 = completecfilepath_271854_2528170400(LOC13, NIM_TRUE); result0 = noschangeFileExt(LOC14, ext0); return result0; } N_NIMCALL(Tpasscontext339002*, myopencached_561249_839829468)(Tsym290834* module0, Trodreader330021* rd0) { Tpasscontext339002* result0; Tcgen527027* m0; NimStringDesc* LOC1; result0 = (Tpasscontext339002*)0; m0 = newmodule_561045_839829468(module0); LOC1 = (NimStringDesc*)0; LOC1 = getcfile_561204_839829468(m0); readmergeinfo_528613_2760143328(LOC1, m0); result0 = &m0->Sup; return result0; } static N_INLINE(NIM_BOOL, skipcodegen_339085_2355241294)(Tnode290802* n0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = (((NI) 0) < gerrorcounter_190072_155036129); return result0; } N_NIMCALL(void, fillloc_530282_839829468)(Tloc290816* a0, Tlockind290808 k0, Ttype290840* typ0, Ropeobj177006* r0, Tstorageloc290812 s0) { { if (!((*a0).k == ((Tlockind290808) 0))) goto LA3; (*a0).k = k0; unsureAsgnRef((void**) (&(*a0).t), typ0); (*a0).s = s0; { if (!((*a0).r == NIM_NIL)) goto LA7; unsureAsgnRef((void**) (&(*a0).r), r0); } LA7: ; } LA3: ; } N_NIMCALL(NIM_BOOL, iskeyword_530960_839829468)(Tident197010* w0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; switch ((*w0).Sup.id) { case ((NI) 200) ... ((NI) 262): case ((NI) 4) ... ((NI) 70): case ((NI) 138): { result0 = NIM_TRUE; goto BeforeRet; } break; default: { result0 = NIM_FALSE; goto BeforeRet; } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj177006*, manglename_531205_839829468)(Tsym290834* s0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = (*s0).loc.r; { NIM_BOOL keeporigname0; NIM_BOOL LOC5; NIM_BOOL LOC6; NIM_BOOL LOC9; NimStringDesc* LOC10; if (!(result0 == NIM_NIL)) goto LA3; LOC5 = (NIM_BOOL)0; LOC6 = (NIM_BOOL)0; LOC6 = ((2824 &(1U<<((NU)((*s0).kind)&31U)))!=0); if (!(LOC6)) goto LA7; LOC6 = ((IL64(2149580812) & (*s0).flags) == 0); LA7: ; LOC5 = LOC6; if (!(LOC5)) goto LA8; LOC9 = (NIM_BOOL)0; LOC9 = iskeyword_530960_839829468((*s0).name); LOC5 = !(LOC9); LA8: ; keeporigname0 = LOC5; LOC10 = (NimStringDesc*)0; LOC10 = mangle_526847_2036603609((*(*s0).name).s); result0 = rope_177277_2381377266(LOC10); { if (!keeporigname0) goto LA13; add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_18)); } goto LA11; LA13: ; { TY531289 LOC16; Ropeobj177006* LOC17; Ropeobj177006* LOC18; TY531289 LOC19; Ropeobj177006* LOC20; NU32 LOC21; Ropeobj177006* LOC22; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (Ropeobj177006*)0; LOC17 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_12), LOC16, 0); add_177482_2381377266(&result0, LOC17); LOC18 = (Ropeobj177006*)0; LOC18 = rope_177401_2381377266(((NI64) ((*s0).Sup.id))); add_177482_2381377266(&result0, LOC18); memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (Ropeobj177006*)0; LOC20 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_12), LOC19, 0); add_177482_2381377266(&result0, LOC20); LOC21 = (NU32)0; LOC21 = hashowner_530977_839829468(s0); LOC22 = (Ropeobj177006*)0; LOC22 = rope_177401_2381377266(((NI64) (LOC21))); add_177482_2381377266(&result0, LOC22); } LA11: ; asgnRefNoCycle((void**) (&(*s0).loc.r), result0); } LA3: ; return result0; } N_NIMCALL(void, fillprocloc_537201_839829468)(Tsym290834* sym0) { { Ropeobj177006* LOC5; if (!((*sym0).loc.k == ((Tlockind290808) 0))) goto LA3; LOC5 = (Ropeobj177006*)0; LOC5 = manglename_531205_839829468(sym0); fillloc_530282_839829468((&(*sym0).loc), ((Tlockind290808) 7), (*sym0).typ, LOC5, ((Tstorageloc290812) 2)); } LA3: ; } N_NIMCALL(void, useheader_530369_839829468)(Tcgen527027* m0, Tsym290834* sym0) { { NimStringDesc* LOC5; NIM_BOOL LOC6; if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag290810) 6))&15U)))!=0)) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = getstr_295230_850551059((*(*sym0).annex).path); LOC6 = (NIM_BOOL)0; LOC6 = includestr_147249_3771138726((&(*m0).headerfiles), LOC5); } LA3: ; } static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0) { (*dest0).data[((*dest0).Sup.len)- 0] = c0; (*dest0).data[((NI)((*dest0).Sup.len + ((NI) 1)))- 0] = 0; (*dest0).Sup.len += ((NI) 1); } N_NIMCALL(NIM_BOOL, isactivated_559431_839829468)(Tsym290834* prc0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = !(((*prc0).typ == NIM_NIL)); return result0; } N_NIMCALL(void, addforwardedproc_530203_839829468)(Tcgen527027* m0, Tsym290834* prc0) { (*m0).forwardedprocs = (Tsymseq290804*) incrSeqV2(&((*m0).forwardedprocs)->Sup, sizeof(Tsym290834*)); asgnRefNoCycle((void**) (&(*m0).forwardedprocs->data[(*m0).forwardedprocs->Sup.len]), prc0); ++(*m0).forwardedprocs->Sup.len; gforwardedprocscounter_527171_3723162438 += ((NI) 1); } N_NIMCALL(void, genclinedir_530725_839829468)(Ropeobj177006** r0, NimStringDesc* filename0, NI line0) { { TY530811 LOC5; NimStringDesc* LOC6; if (!((goptions_168128_2607990831 &(1U<<((NU)(((Toption168009) 10))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (NimStringDesc*)0; LOC6 = makesinglelinecstring_526835_2036603609(filename0); LOC5[0] = rope_177277_2381377266(LOC6); LOC5[1] = rope_177401_2381377266(((NI64) (line0))); addf_178205_2381377266(r0, ((NimStringDesc*) &T839829468_21), LOC5, 2); } LA3: ; } static N_INLINE(NI, tolinenumber_190415_155036129)(Tlineinfo189336 info0) { NI result0; result0 = (NI)0; result0 = ((NI) (info0.line)); return result0; } N_NIMCALL(NI, safelinenm_530721_839829468)(Tlineinfo189336 info0) { NI result0; result0 = (NI)0; result0 = tolinenumber_190415_155036129(info0); { if (!(result0 < ((NI) 0))) goto LA3; result0 = ((NI) 0); } LA3: ; return result0; } N_NIMCALL(void, genclinedir_530813_839829468)(Ropeobj177006** r0, Tlineinfo189336 info0) { NimStringDesc* LOC1; NI LOC2; LOC1 = (NimStringDesc*)0; LOC1 = tofullpath_190264_155036129(info0.fileindex); LOC2 = (NI)0; LOC2 = safelinenm_530721_839829468(info0); genclinedir_530725_839829468(r0, LOC1, LOC2); } N_NIMCALL(Tctypekind527007, mapsettype_531389_839829468)(Ttype290840* typ0) { Tctypekind527007 result0; NI64 LOC1; result0 = (Tctypekind527007)0; LOC1 = (NI64)0; LOC1 = getsize_318135_3876443242(typ0); switch (((NI) (LOC1))) { case ((NI) 1): { result0 = ((Tctypekind527007) 4); } break; case ((NI) 2): { result0 = ((Tctypekind527007) 5); } break; case ((NI) 4): { result0 = ((Tctypekind527007) 6); } break; case ((NI) 8): { result0 = ((Tctypekind527007) 7); } break; default: { result0 = ((Tctypekind527007) 17); } break; } return result0; } N_NIMCALL(Tctypekind527007, maptype_531393_839829468)(Ttype290840* typ0) { Tctypekind527007 result0; result0 = (Tctypekind527007)0; switch ((*typ0).kind) { case ((Ttypekind290244) 0): case ((Ttypekind290244) 7): { result0 = ((Tctypekind527007) 0); } break; case ((Ttypekind290244) 1): { result0 = ((Tctypekind527007) 2); } break; case ((Ttypekind290244) 2): { result0 = ((Tctypekind527007) 1); } break; case ((Ttypekind290244) 19): { result0 = mapsettype_531389_839829468(typ0); } break; case ((Ttypekind290244) 27): case ((Ttypekind290244) 4): case ((Ttypekind290244) 16): case ((Ttypekind290244) 48): { result0 = ((Tctypekind527007) 17); } break; case ((Ttypekind290244) 17): case ((Ttypekind290244) 18): { result0 = ((Tctypekind527007) 19); } break; case ((Ttypekind290244) 10): case ((Ttypekind290244) 11): case ((Ttypekind290244) 12): case ((Ttypekind290244) 13): case ((Ttypekind290244) 15): case ((Ttypekind290244) 46): case ((Ttypekind290244) 47): case ((Ttypekind290244) 49): case ((Ttypekind290244) 8): { Ttype290840* LOC8; LOC8 = (Ttype290840*)0; LOC8 = lastson_293377_850551059(typ0); result0 = maptype_531393_839829468(LOC8); } break; case ((Ttypekind290244) 14): { { NI64 LOC12; LOC12 = (NI64)0; LOC12 = firstord_318001_3876443242(typ0); if (!(LOC12 < IL64(0))) goto LA13; result0 = ((Tctypekind527007) 6); } goto LA10; LA13: ; { NI64 LOC16; LOC16 = (NI64)0; LOC16 = getsize_318135_3876443242(typ0); switch (((NI) (LOC16))) { case ((NI) 1): { result0 = ((Tctypekind527007) 13); } break; case ((NI) 2): { result0 = ((Tctypekind527007) 14); } break; case ((NI) 4): { result0 = ((Tctypekind527007) 6); } break; case ((NI) 8): { result0 = ((Tctypekind527007) 7); } break; default: { internalerror_194113_155036129(((NimStringDesc*) &T839829468_25)); } break; } } LA10: ; } break; case ((Ttypekind290244) 20): { result0 = maptype_531393_839829468((*typ0).sons->data[((NI) 0)]); } break; case ((Ttypekind290244) 21): case ((Ttypekind290244) 23): case ((Ttypekind290244) 22): { Ttype290840* base0; Ttype290840* LOC24; LOC24 = (Ttype290840*)0; LOC24 = lastson_293377_850551059(typ0); base0 = skiptypes_294099_850551059(LOC24, IL64(211106232576256)); switch ((*base0).kind) { case ((Ttypekind290244) 27): case ((Ttypekind290244) 4): case ((Ttypekind290244) 16): case ((Ttypekind290244) 48): { result0 = ((Tctypekind527007) 18); } break; default: { result0 = ((Tctypekind527007) 20); } break; } } break; case ((Ttypekind290244) 26): { result0 = ((Tctypekind527007) 20); } break; case ((Ttypekind290244) 24): { result0 = ((Tctypekind527007) 22); } break; case ((Ttypekind290244) 25): { { if (!!(((*typ0).callconv == ((Tcallingconvention290002) 8)))) goto LA32; result0 = ((Tctypekind527007) 23); } goto LA30; LA32: ; { result0 = ((Tctypekind527007) 19); } LA30: ; } break; case ((Ttypekind290244) 28): { result0 = ((Tctypekind527007) 21); } break; case ((Ttypekind290244) 29): { result0 = ((Tctypekind527007) 24); } break; case ((Ttypekind290244) 31) ... ((Ttypekind290244) 44): { result0 = ((Tctypekind527007) ((NI)(((NI) ((NI)(((NI) ((*typ0).kind)) - ((NI) 31)))) + ((NI) 3)))); } break; case ((Ttypekind290244) 59): { { Ttype290840* LOC43; if (!!(((*typ0).n == NIM_NIL))) goto LA41; LOC43 = (Ttype290840*)0; LOC43 = lastson_293377_850551059(typ0); result0 = maptype_531393_839829468(LOC43); } goto LA39; LA41: ; { internalerror_194113_155036129(((NimStringDesc*) &T839829468_25)); } LA39: ; } break; default: { internalerror_194113_155036129(((NimStringDesc*) &T839829468_25)); } break; } return result0; } N_NIMCALL(NIM_BOOL, isimportedcpptype_531476_839829468)(Ttype290840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).sym == NIM_NIL)); if (!(LOC1)) goto LA2; LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NIM_BOOL, needscomplexassignment_531509_839829468)(Ttype290840* typ0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = containsgarbagecollectedref_318117_3876443242(typ0); return result0; } static N_INLINE(NIM_BOOL, isobjlackingtypefield_531513_839829468)(Ttype290840* typ0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC3; NIM_BOOL LOC4; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*typ0).kind == ((Ttypekind290244) 17)); if (!(LOC1)) goto LA2; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 2))&31U)))!=0); if (!(LOC4)) goto LA5; LOC4 = ((*typ0).sons->data[((NI) 0)] == NIM_NIL); LA5: ; LOC3 = LOC4; if (LOC3) goto LA6; LOC3 = ispureobject_318138_3876443242(typ0); LA6: ; LOC1 = LOC3; LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NIM_BOOL, isinvalidreturntype_531548_839829468)(Ttype290840* rettype0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!(rettype0 == NIM_NIL)) goto LA3; result0 = NIM_TRUE; } goto LA1; LA3: ; { Tctypekind527007 LOC6; LOC6 = (Tctypekind527007)0; LOC6 = maptype_531393_839829468(rettype0); switch (LOC6) { case ((Tctypekind527007) 17): { Ttype290840* LOC8; LOC8 = (Ttype290840*)0; LOC8 = skiptypes_294099_850551059(rettype0, IL64(211106232576256)); result0 = !(((*LOC8).kind == ((Ttypekind290244) 23) || (*LOC8).kind == ((Ttypekind290244) 22) || (*LOC8).kind == ((Ttypekind290244) 21))); } break; case ((Tctypekind527007) 19): { Ttype290840* t0; NIM_BOOL LOC16; NIM_BOOL LOC18; NIM_BOOL LOC20; t0 = skiptypes_294099_850551059(rettype0, IL64(211106232576256)); { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = isimportedcpptype_531476_839829468(rettype0); if (LOC12) goto LA13; LOC12 = isimportedcpptype_531476_839829468(t0); LA13: ; if (!LOC12) goto LA14; result0 = NIM_FALSE; goto BeforeRet; } LA14: ; LOC16 = (NIM_BOOL)0; LOC16 = needscomplexassignment_531509_839829468(t0); if (LOC16) goto LA17; LOC18 = (NIM_BOOL)0; LOC18 = ((*t0).kind == ((Ttypekind290244) 17)); if (!(LOC18)) goto LA19; LOC20 = (NIM_BOOL)0; LOC20 = isobjlackingtypefield_531513_839829468(t0); LOC18 = !(LOC20); LA19: ; LOC16 = LOC18; LA17: ; result0 = LOC16; } break; default: { result0 = NIM_FALSE; } break; } } LA1: ; }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj177006*, typename_531292_839829468)(Ttype290840* typ0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { NimStringDesc* LOC5; if (!!(((*typ0).sym == NIM_NIL))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = mangle_526847_2036603609((*(*(*typ0).sym).name).s); result0 = rope_177277_2381377266(LOC5); } goto LA1; LA3: ; { TY531289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_28), LOC7, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, gettypename_531313_839829468)(Ttype290840* typ0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*typ0).sym == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = !(((96 & (*(*typ0).sym).flags) == 0)); LA4: ; if (!LOC3) goto LA5; result0 = (*(*typ0).sym).loc.r; } goto LA1; LA5: ; { { Ropeobj177006* LOC12; Ropeobj177006* LOC13; if (!((*typ0).loc.r == NIM_NIL)) goto LA10; LOC12 = (Ropeobj177006*)0; LOC12 = typename_531292_839829468(typ0); LOC13 = (Ropeobj177006*)0; LOC13 = rope_177401_2381377266(((NI64) ((*typ0).Sup.id))); asgnRefNoCycle((void**) (&(*typ0).loc.r), HEX26_177418_2381377266(LOC12, LOC13)); } LA10: ; result0 = (*typ0).loc.r; } LA1: ; { NimStringDesc* LOC18; if (!(result0 == NIM_NIL)) goto LA16; LOC18 = (NimStringDesc*)0; LOC18 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI290244))->Sup.len + 13); appendString(LOC18, ((NimStringDesc*) &T839829468_29)); appendString(LOC18, reprEnum((NI)(*typ0).kind, (&NTI290244))); internalerror_194113_155036129(LOC18); } LA16: ; return result0; } N_NIMCALL(Ropeobj177006*, typenameorliteral_531898_839829468)(Ttype290840* t0, NimStringDesc* literal0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = !(((*t0).sym == NIM_NIL)); if (!(LOC4)) goto LA5; LOC4 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag290184) 5))&31U)))!=0); LA5: ; LOC3 = LOC4; if (!(LOC3)) goto LA6; LOC3 = ((*(*t0).sym).magic == ((Tmagic290524) 0)); LA6: ; if (!LOC3) goto LA7; result0 = gettypename_531313_839829468(t0); } goto LA1; LA7: ; { result0 = rope_177277_2381377266(literal0); } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, getsimpletypedesc_531936_839829468)(Tcgen527027* m0, Ttype290840* typ0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; switch ((*typ0).kind) { case ((Ttypekind290244) 26): { result0 = typenameorliteral_531898_839829468(typ0, ((NimStringDesc*) &T839829468_30)); } break; case ((Ttypekind290244) 28): { Ropeobj177006* LOC3; LOC3 = (Ropeobj177006*)0; LOC3 = cgsym_530403_839829468(m0, ((NimStringDesc*) &T839829468_31)); result0 = typenameorliteral_531898_839829468(typ0, ((NimStringDesc*) &T839829468_32)); } break; case ((Ttypekind290244) 29): { result0 = typenameorliteral_531898_839829468(typ0, ((NimStringDesc*) &T839829468_33)); } break; case ((Ttypekind290244) 1): { result0 = typenameorliteral_531898_839829468(typ0, ((NimStringDesc*) &T839829468_34)); } break; case ((Ttypekind290244) 2): { result0 = typenameorliteral_531898_839829468(typ0, ((NimStringDesc*) &T839829468_35)); } break; case ((Ttypekind290244) 5): { result0 = typenameorliteral_531898_839829468(typ0, ((NimStringDesc*) &T839829468_18)); } break; case ((Ttypekind290244) 31) ... ((Ttypekind290244) 44): { result0 = typenameorliteral_531898_839829468(typ0, Numericaltypetostr_531941_839829468[((*typ0).kind)- 31]); } break; case ((Ttypekind290244) 13): case ((Ttypekind290244) 20): case ((Ttypekind290244) 15): { result0 = getsimpletypedesc_531936_839829468(m0, (*typ0).sons->data[((NI) 0)]); } break; case ((Ttypekind290244) 59): { { Ttype290840* LOC15; if (!!(((*typ0).n == NIM_NIL))) goto LA13; LOC15 = (Ttype290840*)0; LOC15 = lastson_293377_850551059(typ0); result0 = getsimpletypedesc_531936_839829468(m0, LOC15); } goto LA11; LA13: ; { internalerror_194113_155036129(((NimStringDesc*) &T839829468_50)); } LA11: ; } break; case ((Ttypekind290244) 11): { Ttype290840* LOC18; LOC18 = (Ttype290840*)0; LOC18 = lastson_293377_850551059(typ0); result0 = getsimpletypedesc_531936_839829468(m0, LOC18); } break; default: { result0 = NIM_NIL; } break; } return result0; } N_NIMCALL(Ropeobj177006*, cachegettype_531591_839829468)(Tidtable290850 tab0, Ttype290840* key0) { Ropeobj177006* result0; Tidobj197004* LOC1; TNimObject* LOC2; result0 = (Ropeobj177006*)0; LOC1 = (Tidobj197004*)0; LOC1 = &key0->Sup; LOC2 = (TNimObject*)0; LOC2 = idtableget_297086_2984716966(tab0, LOC1); result0 = ((Ropeobj177006*) (LOC2)); return result0; } N_NIMCALL(Ropeobj177006*, gettypepre_531972_839829468)(Tcgen527027* m0, Ttype290840* typ0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { if (!(typ0 == NIM_NIL)) goto LA3; result0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_26)); } goto LA1; LA3: ; { result0 = getsimpletypedesc_531936_839829468(m0, typ0); { if (!(result0 == NIM_NIL)) goto LA8; result0 = cachegettype_531591_839829468((*m0).typecache, typ0); } LA8: ; } LA1: ; return result0; } N_NIMCALL(NIM_BOOL, isimportedtype_531449_839829468)(Ttype290840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).sym == NIM_NIL)); if (!(LOC1)) goto LA2; LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag290184) 5))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(NimStringDesc*, getforwardstructformat_532015_839829468)(Tcgen527027* m0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; result0 = copyString(((NimStringDesc*) &T839829468_54)); } goto LA1; LA5: ; { result0 = copyString(((NimStringDesc*) &T839829468_55)); } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, structorunion_532001_839829468)(Ttype290840* t0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag290431) 1))&31U)))!=0)) goto LA3; result0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_56)); } goto LA1; LA3: ; { result0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_57)); } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, gettypeforward_532039_839829468)(Tcgen527027* m0, Ttype290840* typ0) { Ropeobj177006* result0; { result0 = (Ropeobj177006*)0; result0 = cachegettype_531591_839829468((*m0).forwtypecache, typ0); { if (!!((result0 == NIM_NIL))) goto LA3; goto BeforeRet; } LA3: ; result0 = gettypepre_531972_839829468(m0, typ0); { if (!!((result0 == NIM_NIL))) goto LA7; goto BeforeRet; } LA7: ; switch ((*typ0).kind) { case ((Ttypekind290244) 24): case ((Ttypekind290244) 18): case ((Ttypekind290244) 17): { Tidobj197004* LOC17; TNimObject* LOC18; result0 = gettypename_531313_839829468(typ0); { NIM_BOOL LOC12; NimStringDesc* LOC15; TY530811 LOC16; LOC12 = (NIM_BOOL)0; LOC12 = isimportedtype_531449_839829468(typ0); if (!!(LOC12)) goto LA13; LOC15 = (NimStringDesc*)0; LOC15 = getforwardstructformat_532015_839829468(m0); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = structorunion_532001_839829468(typ0); LOC16[1] = result0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 2))- 0], LOC15, LOC16, 2); } LA13: ; LOC17 = (Tidobj197004*)0; LOC17 = &typ0->Sup; LOC18 = (TNimObject*)0; LOC18 = &result0->Sup; idtableput_297094_2984716966((&(*m0).forwtypecache), LOC17, LOC18); } break; default: { NimStringDesc* LOC20; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI290244))->Sup.len + 16); appendString(LOC20, ((NimStringDesc*) &T839829468_58)); appendString(LOC20, reprEnum((NI)(*typ0).kind, (&NTI290244))); appendChar(LOC20, 41); internalerror_194113_155036129(LOC20); } break; } }BeforeRet: ; return result0; } N_NIMCALL(void, pushtype_531958_839829468)(Tcgen527027* m0, Ttype290840* typ0) { (*m0).typestack = (Ttypeseq290836*) incrSeqV2(&((*m0).typestack)->Sup, sizeof(Ttype290840*)); asgnRefNoCycle((void**) (&(*m0).typestack->data[(*m0).typestack->Sup.len]), typ0); ++(*m0).typestack->Sup.len; } N_NIMCALL(Ropeobj177006*, gettypedescweak_532079_839829468)(Tcgen527027* m0, Ttype290840* t0, Intset266030* check0) { Ropeobj177006* result0; Ttype290840* etb0; result0 = (Ropeobj177006*)0; etb0 = skiptypes_294099_850551059(t0, IL64(211106232576256)); switch ((*etb0).kind) { case ((Ttypekind290244) 17): case ((Ttypekind290244) 18): { { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = isimportedcpptype_531476_839829468(etb0); if (!(LOC4)) goto LA5; LOC4 = ((*t0).kind == ((Ttypekind290244) 11)); LA5: ; if (!LOC4) goto LA6; result0 = gettypedescaux_531503_839829468(m0, t0, check0); } goto LA2; LA6: ; { Ttype290840* x0; x0 = getuniquetype_526640_2036603609(etb0); result0 = gettypeforward_532039_839829468(m0, x0); pushtype_531958_839829468(m0, x0); } LA2: ; } break; case ((Ttypekind290244) 24): { Ttype290840* x0; Ropeobj177006* LOC10; x0 = getuniquetype_526640_2036603609(etb0); LOC10 = (Ropeobj177006*)0; LOC10 = gettypeforward_532039_839829468(m0, x0); result0 = HEX26_177447_2381377266(LOC10, ((NimStringDesc*) &T839829468_53)); pushtype_531958_839829468(m0, x0); } break; default: { result0 = gettypedescaux_531503_839829468(m0, t0, check0); } break; } return result0; } static N_INLINE(NI, len_291081_850551059)(Tnode290802* n0) { NI result0; result0 = (NI)0; { if (!(*n0).kindU.S6.sons == 0) goto LA3; result0 = ((NI) 0); } goto LA1; LA3: ; { result0 = ((*n0).kindU.S6.sons ? (*n0).kindU.S6.sons->Sup.len : 0); } LA1: ; return result0; } N_NIMCALL(void, appcg_530632_839829468)(Tcgen527027* m0, Ropeobj177006** c0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0) { Ropeobj177006* LOC1; LOC1 = (Ropeobj177006*)0; LOC1 = ropecg_530407_839829468(m0, frmt0, args0, args0Len0); add_177482_2381377266(c0, LOC1); } N_NIMCALL(NIM_BOOL, scancppgenericslot_532827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0) { NIM_BOOL result0; NI begin0; { result0 = (NIM_BOOL)0; (*cursor0) += ((NI) 1); begin0 = (*cursor0); { while (1) { if (!((NU8)(pat0->data[(*cursor0)]) == (NU8)(42))) goto LA2; (*cursor0) += ((NI) 1); } LA2: ; } { if (!(((NU8)(pat0->data[(*cursor0)])) >= ((NU8)(48)) && ((NU8)(pat0->data[(*cursor0)])) <= ((NU8)(57)))) goto LA5; (*outidx0) = ((NI) ((NI)(((NI) (((NU8)(pat0->data[(*cursor0)])))) - ((NI) 48)))); (*outstars0) = (NI)((*cursor0) - begin0); (*cursor0) += ((NI) 1); result0 = NIM_TRUE; goto BeforeRet; } goto LA3; LA5: ; { result0 = NIM_FALSE; goto BeforeRet; } LA3: ; }BeforeRet: ; return result0; } N_NIMCALL(Ttype290840*, resolvestarsincpptype_532891_839829468)(Ttype290840* typ0, NI idx0, NI stars0) { Ttype290840* result0; result0 = (Ttype290840*)0; { NI LOC3; LOC3 = (NI)0; LOC3 = len_293339_850551059(typ0); if (!(LOC3 <= idx0)) goto LA4; internalerror_194113_155036129(((NimStringDesc*) &T839829468_81)); } LA4: ; result0 = (*typ0).sons->data[idx0]; { NI i_532906_839829468; NI res_532931_839829468; i_532906_839829468 = (NI)0; res_532931_839829468 = ((NI) 1); { while (1) { if (!(res_532931_839829468 <= stars0)) goto LA8; i_532906_839829468 = res_532931_839829468; { NIM_BOOL LOC11; NI LOC13; LOC11 = (NIM_BOOL)0; LOC11 = !((result0 == NIM_NIL)); if (!(LOC11)) goto LA12; LOC13 = (NI)0; LOC13 = len_293339_850551059(result0); LOC11 = (((NI) 0) < LOC13); LA12: ; if (!LOC11) goto LA14; { if (!((*result0).kind == ((Ttypekind290244) 11))) goto LA18; result0 = (*result0).sons->data[((NI) 1)]; } goto LA16; LA18: ; { result0 = elemtype_318394_3876443242(result0); } LA16: ; } LA14: ; res_532931_839829468 += ((NI) 1); } LA8: ; } } return result0; } N_NIMCALL(NimStringDesc*, manglefield_530973_839829468)(Tident197010* name0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; result0 = mangle_526847_2036603609((*name0).s); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = iskeyword_530960_839829468(name0); if (!LOC3) goto LA4; result0->data[((NI) 0)] = nsuToUpperAsciiChar(result0->data[((NI) 0)]); } LA4: ; return result0; } N_NIMCALL(Ropeobj177006*, manglerecfieldname_532361_839829468)(Tsym290834* field0, Ttype290840* rectype0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*rectype0).sym == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = !(((96 & (*(*rectype0).sym).flags) == 0)); LA4: ; if (!LOC3) goto LA5; result0 = (*field0).loc.r; } goto LA1; LA5: ; { NimStringDesc* LOC8; LOC8 = (NimStringDesc*)0; LOC8 = manglefield_530973_839829468((*field0).name); result0 = rope_177277_2381377266(LOC8); } LA1: ; { if (!(result0 == NIM_NIL)) goto LA11; internalerror_194100_155036129((*field0).info, ((NimStringDesc*) &T839829468_96)); } LA11: ; return result0; } N_NIMCALL(Ropeobj177006*, genrecordfieldsaux_532421_839829468)(Tcgen527027* m0, Tnode290802* n0, Ropeobj177006* accessexpr0, Ttype290840* rectype0, Intset266030* check0) { Ropeobj177006* result0; Ropeobj177006* ae0; Ropeobj177006* uname0; Ropeobj177006* sname0; Ropeobj177006* a0; Tnode290802* k0; Tsym290834* field0; { result0 = (Ropeobj177006*)0; ae0 = (Ropeobj177006*)0; uname0 = (Ropeobj177006*)0; sname0 = (Ropeobj177006*)0; a0 = (Ropeobj177006*)0; k0 = (Tnode290802*)0; field0 = (Tsym290834*)0; result0 = NIM_NIL; switch ((*n0).kind) { case ((Tnodekind290020) 138): { { NI i_532447_839829468; NI HEX3Atmp_532620_839829468; NI LOC3; NI res_532623_839829468; i_532447_839829468 = (NI)0; HEX3Atmp_532620_839829468 = (NI)0; LOC3 = (NI)0; LOC3 = sonslen_293351_850551059(n0); HEX3Atmp_532620_839829468 = (NI)(LOC3 - ((NI) 1)); res_532623_839829468 = ((NI) 0); { while (1) { Ropeobj177006* LOC6; if (!(res_532623_839829468 <= HEX3Atmp_532620_839829468)) goto LA5; i_532447_839829468 = res_532623_839829468; LOC6 = (Ropeobj177006*)0; LOC6 = genrecordfieldsaux_532421_839829468(m0, (*n0).kindU.S6.sons->data[i_532447_839829468], accessexpr0, rectype0, check0); add_177482_2381377266(&result0, LOC6); res_532623_839829468 += ((NI) 1); } LA5: ; } } } break; case ((Tnodekind290020) 139): { Ropeobj177006* LOC12; NimStringDesc* LOC13; NimStringDesc* LOC14; Ropeobj177006* unionbody0; { if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3)))) goto LA10; internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_89)); } LA10: ; LOC12 = (Ropeobj177006*)0; LOC12 = genrecordfieldsaux_532421_839829468(m0, (*n0).kindU.S6.sons->data[((NI) 0)], accessexpr0, rectype0, check0); add_177482_2381377266(&result0, LOC12); LOC13 = (NimStringDesc*)0; LOC14 = (NimStringDesc*)0; LOC14 = mangle_526847_2036603609((*(*(*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s); LOC13 = rawNewString(LOC14->Sup.len + 1); appendString(LOC13, LOC14); appendChar(LOC13, 85); uname0 = rope_177277_2381377266(LOC13); { TY530811 LOC19; if (!!((accessexpr0 == NIM_NIL))) goto LA17; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = accessexpr0; LOC19[1] = uname0; ae0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_90), LOC19, 2); } goto LA15; LA17: ; { ae0 = uname0; } LA15: ; unionbody0 = NIM_NIL; { NI i_532491_839829468; NI HEX3Atmp_532629_839829468; NI LOC22; NI res_532632_839829468; i_532491_839829468 = (NI)0; HEX3Atmp_532629_839829468 = (NI)0; LOC22 = (NI)0; LOC22 = sonslen_293351_850551059(n0); HEX3Atmp_532629_839829468 = (NI)(LOC22 - ((NI) 1)); res_532632_839829468 = ((NI) 1); { while (1) { if (!(res_532632_839829468 <= HEX3Atmp_532629_839829468)) goto LA24; i_532491_839829468 = res_532632_839829468; switch ((*(*n0).kindU.S6.sons->data[i_532491_839829468]).kind) { case ((Tnodekind290020) 85): case ((Tnodekind290020) 88): { k0 = lastson_293364_850551059((*n0).kindU.S6.sons->data[i_532491_839829468]); { Ropeobj177006* LOC30; TY530811 LOC31; Ropeobj177006* LOC32; if (!!(((*k0).kind == ((Tnodekind290020) 3)))) goto LA28; LOC30 = (Ropeobj177006*)0; LOC30 = rope_177401_2381377266(((NI64) (i_532491_839829468))); sname0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_91), LOC30); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = ae0; LOC31[1] = sname0; LOC32 = (Ropeobj177006*)0; LOC32 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_90), LOC31, 2); a0 = genrecordfieldsaux_532421_839829468(m0, k0, LOC32, rectype0, check0); { TY177507 LOC37; if (!!((a0 == NIM_NIL))) goto LA35; add_177487_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_92)); add_177482_2381377266(&unionbody0, a0); memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = sname0; addf_178205_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_93), LOC37, 1); } LA35: ; } goto LA26; LA28: ; { Ropeobj177006* LOC39; LOC39 = (Ropeobj177006*)0; LOC39 = genrecordfieldsaux_532421_839829468(m0, k0, ae0, rectype0, check0); add_177482_2381377266(&unionbody0, LOC39); } LA26: ; } break; default: { internalerror_194113_155036129(((NimStringDesc*) &T839829468_94)); } break; } res_532632_839829468 += ((NI) 1); } LA24: ; } } { TY530811 LOC45; if (!!((unionbody0 == NIM_NIL))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = unionbody0; LOC45[1] = uname0; addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_95), LOC45, 2); } LA43: ; } break; case ((Tnodekind290020) 3): { field0 = (*n0).kindU.S4.sym; { if (!((*(*field0).typ).kind == ((Ttypekind290244) 62))) goto LA49; goto BeforeRet; } LA49: ; sname0 = manglerecfieldname_532361_839829468(field0, rectype0); { TY530811 LOC55; if (!!((accessexpr0 == NIM_NIL))) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = accessexpr0; LOC55[1] = sname0; ae0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_90), LOC55, 2); } goto LA51; LA53: ; { ae0 = sname0; } LA51: ; fillloc_530282_839829468((&(*field0).loc), ((Tlockind290808) 5), (*field0).typ, ae0, ((Tstorageloc290812) 0)); { NIM_BOOL LOC59; Ttype290840* fieldtype0; LOC59 = (NIM_BOOL)0; LOC59 = isimportedcpptype_531476_839829468(rectype0); if (!!(LOC59)) goto LA60; fieldtype0 = skiptypes_294099_850551059((*field0).loc.t, IL64(211106232576256)); { NIM_BOOL LOC64; TY530811 LOC68; Ttype290840* LOC69; LOC64 = (NIM_BOOL)0; LOC64 = ((*fieldtype0).kind == ((Ttypekind290244) 16)); if (!(LOC64)) goto LA65; LOC64 = (((*fieldtype0).flags &(1U<<((NU)(((Ttypeflag290431) 0))&31U)))!=0); LA65: ; if (!LOC64) goto LA66; memset((void*)LOC68, 0, sizeof(LOC68)); LOC69 = (Ttype290840*)0; LOC69 = elemtype_318394_3876443242(fieldtype0); LOC68[0] = gettypedescaux_531503_839829468(m0, LOC69, check0); LOC68[1] = sname0; addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_97), LOC68, 2); } goto LA62; LA66: ; { TY530811 LOC73; if (!((*fieldtype0).kind == ((Ttypekind290244) 24))) goto LA71; memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = gettypedescweak_532079_839829468(m0, (*field0).loc.t, check0); LOC73[1] = sname0; addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC73, 2); } goto LA62; LA71: ; { TY533238 LOC77; NimStringDesc* LOC78; if (!!(((*field0).kindU.S4.bitsize == ((NI) 0)))) goto LA75; memset((void*)LOC77, 0, sizeof(LOC77)); LOC77[0] = gettypedescaux_531503_839829468(m0, (*field0).loc.t, check0); LOC77[1] = sname0; LOC78 = (NimStringDesc*)0; LOC78 = nimIntToStr((*field0).kindU.S4.bitsize); LOC77[2] = rope_177277_2381377266(LOC78); addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_98), LOC77, 3); } goto LA62; LA75: ; { TY530811 LOC80; memset((void*)LOC80, 0, sizeof(LOC80)); LOC80[0] = gettypedescaux_531503_839829468(m0, (*field0).loc.t, check0); LOC80[1] = sname0; addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC80, 2); } LA62: ; } LA60: ; } break; default: { internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_99)); } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj177006*, getrecordfields_532636_839829468)(Tcgen527027* m0, Ttype290840* typ0, Intset266030* check0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = genrecordfieldsaux_532421_839829468(m0, (*typ0).n, NIM_NIL, typ0, check0); return result0; } N_NIMCALL(Ropeobj177006*, getrecorddesc_532643_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0, Intset266030* check0) { Ropeobj177006* result0; NIM_BOOL hasfield0; Ropeobj177006* attribute0; TY533238 LOC6; Ropeobj177006* desc0; NimStringDesc* LOC46; result0 = (Ropeobj177006*)0; hasfield0 = NIM_FALSE; { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 21))&31U)))!=0)) goto LA3; attribute0 = rope_177277_2381377266(Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field19); } goto LA1; LA3: ; { attribute0 = NIM_NIL; } LA1: ; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = structorunion_532001_839829468(typ0); LOC6[1] = name0; LOC6[2] = attribute0; result0 = ropecg_530407_839829468(m0, Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field18, LOC6, 3); { if (!((*typ0).kind == ((Ttypekind290244) 17))) goto LA9; { if (!((*typ0).sons->data[((NI) 0)] == NIM_NIL)) goto LA13; { NIM_BOOL LOC17; NIM_BOOL LOC18; TY531289 LOC23; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = !(((*typ0).sym == NIM_NIL)); if (!(LOC18)) goto LA19; LOC18 = (((*(*typ0).sym).flags &(1U<<((NU)(((Tsymflag290184) 9))&31U)))!=0); LA19: ; LOC17 = LOC18; if (LOC17) goto LA20; LOC17 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 2))&31U)))!=0); LA20: ; if (!LOC17) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); appcg_530632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_85), LOC23, 0); } goto LA15; LA21: ; { TY530811 LOC25; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = name0; LOC25[1] = attribute0; appcg_530632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_86), LOC25, 2); hasfield0 = NIM_TRUE; } LA15: ; } goto LA11; LA13: ; { NIM_BOOL LOC27; TY177507 LOC31; Ttype290840* LOC32; LOC27 = (NIM_BOOL)0; LOC27 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC27) goto LA28; LOC27 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA28: ; if (!LOC27) goto LA29; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ttype290840*)0; LOC32 = skiptypes_294099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360)); LOC31[0] = gettypedescaux_531503_839829468(m0, LOC32, check0); appcg_530632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_87), LOC31, 1); hasfield0 = NIM_TRUE; } goto LA11; LA29: ; { TY177507 LOC34; Ttype290840* LOC35; memset((void*)LOC34, 0, sizeof(LOC34)); LOC35 = (Ttype290840*)0; LOC35 = skiptypes_294099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360)); LOC34[0] = gettypedescaux_531503_839829468(m0, LOC35, check0); appcg_530632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_88), LOC34, 1); hasfield0 = NIM_TRUE; } LA11: ; } goto LA7; LA9: ; { TY177507 LOC37; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = name0; addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_85), LOC37, 1); } LA7: ; desc0 = getrecordfields_532636_839829468(m0, typ0, check0); { NIM_BOOL LOC40; TY531289 LOC44; LOC40 = (NIM_BOOL)0; LOC40 = (desc0 == NIM_NIL); if (!(LOC40)) goto LA41; LOC40 = !(hasfield0); LA41: ; if (!LOC40) goto LA42; memset((void*)LOC44, 0, sizeof(LOC44)); addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_100), LOC44, 0); } goto LA38; LA42: ; { add_177482_2381377266(&result0, desc0); } LA38: ; LOC46 = (NimStringDesc*)0; LOC46 = rawNewString(tnl_175644_4151366050->Sup.len + 2); appendString(LOC46, ((NimStringDesc*) &T839829468_101)); appendString(LOC46, tnl_175644_4151366050); add_177487_2381377266(&result0, LOC46); return result0; } N_NIMCALL(Ropeobj177006*, gettupledesc_532777_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0, Intset266030* check0) { Ropeobj177006* result0; TY530811 LOC1; Ropeobj177006* desc0; NimStringDesc* LOC13; result0 = (Ropeobj177006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = structorunion_532001_839829468(typ0); LOC1[1] = name0; result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_102), LOC1, 2); desc0 = NIM_NIL; { NI i_532799_839829468; NI HEX3Atmp_532820_839829468; NI LOC3; NI res_532823_839829468; i_532799_839829468 = (NI)0; HEX3Atmp_532820_839829468 = (NI)0; LOC3 = (NI)0; LOC3 = sonslen_293327_850551059(typ0); HEX3Atmp_532820_839829468 = (NI)(LOC3 - ((NI) 1)); res_532823_839829468 = ((NI) 0); { while (1) { TY530811 LOC6; if (!(res_532823_839829468 <= HEX3Atmp_532820_839829468)) goto LA5; i_532799_839829468 = res_532823_839829468; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = gettypedescaux_531503_839829468(m0, (*typ0).sons->data[i_532799_839829468], check0); LOC6[1] = rope_177401_2381377266(((NI64) (i_532799_839829468))); addf_178205_2381377266(&desc0, ((NimStringDesc*) &T839829468_103), LOC6, 2); res_532823_839829468 += ((NI) 1); } LA5: ; } } { NimStringDesc* LOC11; if (!(desc0 == NIM_NIL)) goto LA9; LOC11 = (NimStringDesc*)0; LOC11 = rawNewString(tnl_175644_4151366050->Sup.len + 11); appendString(LOC11, ((NimStringDesc*) &T839829468_104)); appendString(LOC11, tnl_175644_4151366050); add_177487_2381377266(&result0, LOC11); } goto LA7; LA9: ; { add_177482_2381377266(&result0, desc0); } LA7: ; LOC13 = (NimStringDesc*)0; LOC13 = rawNewString(tnl_175644_4151366050->Sup.len + 2); appendString(LOC13, ((NimStringDesc*) &T839829468_101)); appendString(LOC13, tnl_175644_4151366050); add_177487_2381377266(&result0, LOC13); return result0; } N_NIMCALL(Ropeobj177006*, gettypedescaux_531503_839829468)(Tcgen527027* m0, Ttype290840* typ0, Intset266030* check0) { Ropeobj177006* result0; Ttype290840* t_532942_839829468; { result0 = (Ropeobj177006*)0; t_532942_839829468 = getuniquetype_526640_2036603609(typ0); { if (!(t_532942_839829468 == NIM_NIL)) goto LA3; internalerror_194113_155036129(((NimStringDesc*) &T839829468_27)); } LA3: ; { if (!!(((*t_532942_839829468).sym == NIM_NIL))) goto LA7; useheader_530369_839829468(m0, (*t_532942_839829468).sym); } LA7: ; result0 = gettypepre_531972_839829468(m0, t_532942_839829468); { if (!!((result0 == NIM_NIL))) goto LA11; goto BeforeRet; } LA11: ; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = containsorincl_266862_2627731572(check0, (*t_532942_839829468).Sup.id); if (!LOC15) goto LA16; { NIM_BOOL LOC20; NimStringDesc* LOC24; NimStringDesc* LOC25; LOC20 = (NIM_BOOL)0; LOC20 = isimportedcpptype_531476_839829468(typ0); if (LOC20) goto LA21; LOC20 = isimportedcpptype_531476_839829468(t_532942_839829468); LA21: ; if (!!(LOC20)) goto LA22; LOC24 = (NimStringDesc*)0; LOC25 = (NimStringDesc*)0; LOC25 = typetostring_318017_3876443242(typ0, ((Tprefereddesc318011) 0)); LOC24 = rawNewString(LOC25->Sup.len + 28); appendString(LOC24, ((NimStringDesc*) &T839829468_51)); appendString(LOC24, LOC25); internalerror_194113_155036129(LOC24); } LA22: ; } LA16: ; switch ((*t_532942_839829468).kind) { case ((Ttypekind290244) 22): case ((Ttypekind290244) 21): case ((Ttypekind290244) 23): { NimStringDesc* star0; Ttype290840* et0; Ttype290840* LOC38; Ttype290840* etb0; { NIM_BOOL LOC29; NIM_BOOL LOC30; NIM_BOOL LOC33; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((*t_532942_839829468).kind == ((Ttypekind290244) 23)); if (!(LOC30)) goto LA31; LOC30 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 18))&31U)))!=0)); LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA32; LOC33 = (NIM_BOOL)0; LOC33 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC33) goto LA34; LOC33 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA34: ; LOC29 = LOC33; LA32: ; if (!LOC29) goto LA35; star0 = copyString(((NimStringDesc*) &T839829468_52)); } goto LA27; LA35: ; { star0 = copyString(((NimStringDesc*) &T839829468_53)); } LA27: ; LOC38 = (Ttype290840*)0; LOC38 = skiptypes_294099_850551059(typ0, IL64(211106232576256)); et0 = lastson_293377_850551059(LOC38); etb0 = skiptypes_294099_850551059(et0, IL64(211106232576256)); { if (!((*etb0).kind == ((Ttypekind290244) 4) || (*etb0).kind == ((Ttypekind290244) 16) || (*etb0).kind == ((Ttypekind290244) 27) || (*etb0).kind == ((Ttypekind290244) 48))) goto LA41; et0 = elemtype_318394_3876443242(etb0); etb0 = skiptypes_294099_850551059(et0, IL64(211106232576256)); star0->data[((NI) 0)] = 42; } LA41: ; switch ((*etb0).kind) { case ((Ttypekind290244) 17): case ((Ttypekind290244) 18): { { NIM_BOOL LOC46; Ropeobj177006* LOC50; LOC46 = (NIM_BOOL)0; LOC46 = isimportedcpptype_531476_839829468(etb0); if (!(LOC46)) goto LA47; LOC46 = ((*et0).kind == ((Ttypekind290244) 11)); LA47: ; if (!LOC46) goto LA48; LOC50 = (Ropeobj177006*)0; LOC50 = gettypedescaux_531503_839829468(m0, et0, check0); result0 = HEX26_177447_2381377266(LOC50, star0); } goto LA44; LA48: ; { Ttype290840* x0; Ropeobj177006* name0; Tidobj197004* LOC52; TNimObject* LOC53; x0 = getuniquetype_526640_2036603609(etb0); name0 = gettypeforward_532039_839829468(m0, x0); result0 = HEX26_177447_2381377266(name0, star0); LOC52 = (Tidobj197004*)0; LOC52 = &t_532942_839829468->Sup; LOC53 = (TNimObject*)0; LOC53 = &result0->Sup; idtableput_297094_2984716966((&(*m0).typecache), LOC52, LOC53); pushtype_531958_839829468(m0, x0); } LA44: ; } break; case ((Ttypekind290244) 24): { Ttype290840* x0; Ropeobj177006* name0; Ropeobj177006* LOC55; Tidobj197004* LOC56; TNimObject* LOC57; x0 = getuniquetype_526640_2036603609(etb0); name0 = gettypeforward_532039_839829468(m0, x0); LOC55 = (Ropeobj177006*)0; LOC55 = HEX26_177447_2381377266(name0, ((NimStringDesc*) &T839829468_53)); result0 = HEX26_177447_2381377266(LOC55, star0); LOC56 = (Tidobj197004*)0; LOC56 = &t_532942_839829468->Sup; LOC57 = (TNimObject*)0; LOC57 = &result0->Sup; idtableput_297094_2984716966((&(*m0).typecache), LOC56, LOC57); pushtype_531958_839829468(m0, x0); } break; default: { Ropeobj177006* LOC59; Tidobj197004* LOC60; TNimObject* LOC61; LOC59 = (Ropeobj177006*)0; LOC59 = gettypedescaux_531503_839829468(m0, et0, check0); result0 = HEX26_177447_2381377266(LOC59, star0); LOC60 = (Tidobj197004*)0; LOC60 = &t_532942_839829468->Sup; LOC61 = (TNimObject*)0; LOC61 = &result0->Sup; idtableput_297094_2984716966((&(*m0).typecache), LOC60, LOC61); } break; } } break; case ((Ttypekind290244) 27): case ((Ttypekind290244) 48): { Ropeobj177006* LOC63; Tidobj197004* LOC64; TNimObject* LOC65; LOC63 = (Ropeobj177006*)0; LOC63 = gettypedescweak_532079_839829468(m0, (*t_532942_839829468).sons->data[((NI) 0)], check0); result0 = HEX26_177447_2381377266(LOC63, ((NimStringDesc*) &T839829468_53)); LOC64 = (Tidobj197004*)0; LOC64 = &t_532942_839829468->Sup; LOC65 = (TNimObject*)0; LOC65 = &result0->Sup; idtableput_297094_2984716966((&(*m0).typecache), LOC64, LOC65); } break; case ((Ttypekind290244) 20): case ((Ttypekind290244) 14): { Ttype290840* t0; { if (!((*t_532942_839829468).kind == ((Ttypekind290244) 20))) goto LA69; t0 = lastson_293377_850551059(t_532942_839829468); } goto LA67; LA69: ; { t0 = t_532942_839829468; } LA67: ; result0 = cachegettype_531591_839829468((*m0).typecache, t0); { if (!(result0 == NIM_NIL)) goto LA74; result0 = gettypename_531313_839829468(t0); { NIM_BOOL LOC78; NIM_BOOL LOC80; Tidobj197004* LOC84; TNimObject* LOC85; NI size0; NU32 owner0; LOC78 = (NIM_BOOL)0; LOC78 = isimportedcpptype_531476_839829468(t0); if (LOC78) goto LA79; LOC80 = (NIM_BOOL)0; LOC80 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag290184) 5))&31U)))!=0); if (!(LOC80)) goto LA81; LOC80 = ((*(*t0).sym).magic == ((Tmagic290524) 0)); LA81: ; LOC78 = LOC80; LA79: ; if (!!(LOC78)) goto LA82; LOC84 = (Tidobj197004*)0; LOC84 = &t0->Sup; LOC85 = (TNimObject*)0; LOC85 = &result0->Sup; idtableput_297094_2984716966((&(*m0).typecache), LOC84, LOC85); size0 = (NI)0; { NI64 LOC88; TY177507 LOC91; LOC88 = (NI64)0; LOC88 = firstord_318001_3876443242(t0); if (!(LOC88 < IL64(0))) goto LA89; memset((void*)LOC91, 0, sizeof(LOC91)); LOC91[0] = result0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC91, 1); size0 = ((NI) 4); } goto LA86; LA89: ; { NI64 LOC93; LOC93 = (NI64)0; LOC93 = getsize_318135_3876443242(t0); size0 = ((NI) (LOC93)); switch (size0) { case ((NI) 1): { TY177507 LOC95; memset((void*)LOC95, 0, sizeof(LOC95)); LOC95[0] = result0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_60), LOC95, 1); } break; case ((NI) 2): { TY177507 LOC97; memset((void*)LOC97, 0, sizeof(LOC97)); LOC97[0] = result0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_61), LOC97, 1); } break; case ((NI) 4): { TY177507 LOC99; memset((void*)LOC99, 0, sizeof(LOC99)); LOC99[0] = result0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC99, 1); } break; case ((NI) 8): { TY177507 LOC101; memset((void*)LOC101, 0, sizeof(LOC101)); LOC101[0] = result0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_62), LOC101, 1); } break; default: { internalerror_194100_155036129((*(*t0).sym).info, ((NimStringDesc*) &T839829468_63)); } break; } } LA86: ; owner0 = hashowner_530977_839829468((*t0).sym); { NIM_BOOL LOC105; TY201017* vals0; Enumdesc201007 LOC114; LOC105 = (NIM_BOOL)0; LOC105 = hasenum_201230_1926258066(gdebuginfo_201470_1926258066, (*(*(*t0).sym).name).s, ((NI) ((*(*t0).sym).info.line)), owner0); if (!!(LOC105)) goto LA106; vals0 = (TY201017*) newSeq((&NTI201017), 0); { NI i_533144_839829468; NI HEX3Atmp_533648_839829468; NI LOC109; NI res_533651_839829468; i_533144_839829468 = (NI)0; HEX3Atmp_533648_839829468 = (NI)0; LOC109 = (NI)0; LOC109 = len_291081_850551059((*t0).n); HEX3Atmp_533648_839829468 = (NI)(LOC109 - ((NI) 1)); res_533651_839829468 = ((NI) 0); { while (1) { Tsym290834* field0; TY201018 LOC112; NimStringDesc* LOC113; if (!(res_533651_839829468 <= HEX3Atmp_533648_839829468)) goto LA111; i_533144_839829468 = res_533651_839829468; field0 = (*(*(*t0).n).kindU.S6.sons->data[i_533144_839829468]).kindU.S4.sym; memset((void*)(&LOC112), 0, sizeof(LOC112)); LOC112.Field0 = copyString((*(*field0).name).s); LOC112.Field1 = (*field0).position; vals0 = (TY201017*) incrSeqV2(&(vals0)->Sup, sizeof(TY201018)); LOC113 = (NimStringDesc*)0; LOC113 = vals0->data[vals0->Sup.len].Field0; vals0->data[vals0->Sup.len].Field0 = copyStringRC1(LOC112.Field0); if (LOC113) nimGCunrefNoCycle(LOC113); vals0->data[vals0->Sup.len].Field1 = LOC112.Field1; ++vals0->Sup.len; res_533651_839829468 += ((NI) 1); } LA111: ; } } memset((void*)(&LOC114), 0, sizeof(LOC114)); memset((void*)(&LOC114), 0, sizeof(LOC114)); LOC114.size = size0; LOC114.owner = owner0; LOC114.id = (*(*t0).sym).Sup.id; LOC114.name = copyString((*(*(*t0).sym).name).s); genericSeqAssign((&LOC114.values), vals0, (&NTI201017)); registerenum_201419_1926258066((&gdebuginfo_201470_1926258066), (&LOC114)); } LA106: ; } LA82: ; } LA74: ; } break; case ((Ttypekind290244) 25): { Tidobj197004* LOC116; TNimObject* LOC117; Ropeobj177006* rettype0; Ropeobj177006* desc0; result0 = gettypename_531313_839829468(t_532942_839829468); LOC116 = (Tidobj197004*)0; LOC116 = &t_532942_839829468->Sup; LOC117 = (TNimObject*)0; LOC117 = &result0->Sup; idtableput_297094_2984716966((&(*m0).typecache), LOC116, LOC117); rettype0 = (Ropeobj177006*)0; desc0 = (Ropeobj177006*)0; genprocparams_532115_839829468(m0, t_532942_839829468, &rettype0, &desc0, check0, NIM_TRUE, NIM_TRUE); { NIM_BOOL LOC120; LOC120 = (NIM_BOOL)0; LOC120 = isimportedtype_531449_839829468(t_532942_839829468); if (!!(LOC120)) goto LA121; { TY533235 LOC127; if (!!(((*t_532942_839829468).callconv == ((Tcallingconvention290002) 8)))) goto LA125; memset((void*)LOC127, 0, sizeof(LOC127)); LOC127[0] = rope_177277_2381377266(Callingconvtostr_531585_839829468[((*t_532942_839829468).callconv)- 0]); LOC127[1] = rettype0; LOC127[2] = result0; LOC127[3] = desc0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC127, 4); } goto LA123; LA125: ; { TY533238 LOC129; memset((void*)LOC129, 0, sizeof(LOC129)); LOC129[0] = result0; LOC129[1] = rettype0; LOC129[2] = desc0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC129, 3); } LA123: ; } LA121: ; } break; case ((Ttypekind290244) 24): { Tidobj197004* LOC144; Ropeobj177006* LOC145; TNimObject* LOC146; result0 = cachegettype_531591_839829468((*m0).forwtypecache, t_532942_839829468); { Tidobj197004* LOC142; TNimObject* LOC143; if (!(result0 == NIM_NIL)) goto LA133; result0 = gettypename_531313_839829468(t_532942_839829468); { NIM_BOOL LOC137; NimStringDesc* LOC140; TY530811 LOC141; LOC137 = (NIM_BOOL)0; LOC137 = isimportedtype_531449_839829468(t_532942_839829468); if (!!(LOC137)) goto LA138; LOC140 = (NimStringDesc*)0; LOC140 = getforwardstructformat_532015_839829468(m0); memset((void*)LOC141, 0, sizeof(LOC141)); LOC141[0] = structorunion_532001_839829468(t_532942_839829468); LOC141[1] = result0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 2))- 0], LOC140, LOC141, 2); } LA138: ; LOC142 = (Tidobj197004*)0; LOC142 = &t_532942_839829468->Sup; LOC143 = (TNimObject*)0; LOC143 = &result0->Sup; idtableput_297094_2984716966((&(*m0).forwtypecache), LOC142, LOC143); } LA133: ; LOC144 = (Tidobj197004*)0; LOC144 = &t_532942_839829468->Sup; LOC145 = (Ropeobj177006*)0; LOC145 = HEX26_177447_2381377266(result0, ((NimStringDesc*) &T839829468_53)); LOC146 = (TNimObject*)0; LOC146 = &LOC145->Sup; idtableput_297094_2984716966((&(*m0).typecache), LOC144, LOC146); { NIM_BOOL LOC149; LOC149 = (NIM_BOOL)0; LOC149 = isimportedtype_531449_839829468(t_532942_839829468); if (!!(LOC149)) goto LA150; { Ttype290840* LOC154; NimStringDesc* LOC157; NimStringDesc* LOC158; TY530811 LOC166; LOC154 = (Ttype290840*)0; LOC154 = skiptypes_294099_850551059((*t_532942_839829468).sons->data[((NI) 0)], IL64(211106232576256)); if (!!(((*LOC154).kind == ((Ttypekind290244) 3)))) goto LA155; LOC157 = (NimStringDesc*)0; LOC158 = (NimStringDesc*)0; { NIM_BOOL LOC161; LOC161 = (NIM_BOOL)0; LOC161 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC161) goto LA162; LOC161 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA162: ; if (!LOC161) goto LA163; LOC158 = copyString(((NimStringDesc*) &T839829468_76)); } goto LA159; LA163: ; { LOC158 = copyString(((NimStringDesc*) &T839829468_77)); } LA159: ; LOC157 = rawNewString(LOC158->Sup.len + 31); appendString(LOC157, LOC158); appendString(LOC157, ((NimStringDesc*) &T839829468_78)); memset((void*)LOC166, 0, sizeof(LOC166)); LOC166[0] = gettypedescaux_531503_839829468(m0, (*t_532942_839829468).sons->data[((NI) 0)], check0); LOC166[1] = result0; appcg_530632_839829468(m0, &(*m0).s[(((Tcfilesection527005) 4))- 0], LOC157, LOC166, 2); } goto LA152; LA155: ; { result0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_79)); } LA152: ; } LA150: ; add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_53)); } break; case ((Ttypekind290244) 4): case ((Ttypekind290244) 16): { NI64 n0; Tidobj197004* LOC173; TNimObject* LOC174; n0 = lengthord_318007_3876443242(t_532942_839829468); { if (!(n0 <= IL64(0))) goto LA171; n0 = IL64(1); } LA171: ; result0 = gettypename_531313_839829468(t_532942_839829468); LOC173 = (Tidobj197004*)0; LOC173 = &t_532942_839829468->Sup; LOC174 = (TNimObject*)0; LOC174 = &result0->Sup; idtableput_297094_2984716966((&(*m0).typecache), LOC173, LOC174); { NIM_BOOL LOC177; Ropeobj177006* foo0; TY533238 LOC180; LOC177 = (NIM_BOOL)0; LOC177 = isimportedtype_531449_839829468(t_532942_839829468); if (!!(LOC177)) goto LA178; foo0 = gettypedescaux_531503_839829468(m0, (*t_532942_839829468).sons->data[((NI) 1)], check0); memset((void*)LOC180, 0, sizeof(LOC180)); LOC180[0] = foo0; LOC180[1] = result0; LOC180[2] = rope_177401_2381377266(n0); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_80), LOC180, 3); } LA178: ; } break; case ((Ttypekind290244) 17): case ((Ttypekind290244) 18): { { NIM_BOOL LOC184; Ropeobj177006* cppname0; NI i0; NI chunkstart0; Ropeobj177006* LOC226; LOC184 = (NIM_BOOL)0; LOC184 = isimportedcpptype_531476_839829468(t_532942_839829468); if (!(LOC184)) goto LA185; LOC184 = ((*typ0).kind == ((Ttypekind290244) 11)); LA185: ; if (!LOC184) goto LA186; cppname0 = gettypename_531313_839829468(t_532942_839829468); i0 = ((NI) 0); chunkstart0 = ((NI) 0); { while (1) { if (!(i0 < ((*cppname0).data ? (*cppname0).data->Sup.len : 0))) goto LA189; { NI chunkend0; NI idx0; NI stars0; if (!((NU8)((*cppname0).data->data[i0]) == (NU8)(39))) goto LA192; chunkend0 = (i0 - 1); idx0 = (NI)0; stars0 = (NI)0; { NIM_BOOL LOC196; NimStringDesc* LOC199; Ttype290840* typeinslot0; LOC196 = (NIM_BOOL)0; LOC196 = scancppgenericslot_532827_839829468((*cppname0).data, (&i0), (&idx0), (&stars0)); if (!LOC196) goto LA197; LOC199 = (NimStringDesc*)0; LOC199 = copyStrLast((*cppname0).data, chunkstart0, chunkend0); add_177487_2381377266(&result0, LOC199); chunkstart0 = i0; typeinslot0 = resolvestarsincpptype_532891_839829468(typ0, (NI)(idx0 + ((NI) 1)), stars0); { NIM_BOOL LOC202; TY531289 LOC206; Ropeobj177006* LOC207; LOC202 = (NIM_BOOL)0; LOC202 = (typeinslot0 == NIM_NIL); if (LOC202) goto LA203; LOC202 = ((*typeinslot0).kind == ((Ttypekind290244) 62)); LA203: ; if (!LOC202) goto LA204; memset((void*)LOC206, 0, sizeof(LOC206)); LOC207 = (Ropeobj177006*)0; LOC207 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_26), LOC206, 0); add_177482_2381377266(&result0, LOC207); } goto LA200; LA204: ; { Ropeobj177006* LOC209; LOC209 = (Ropeobj177006*)0; LOC209 = gettypedescaux_531503_839829468(m0, typeinslot0, check0); add_177482_2381377266(&result0, LOC209); } LA200: ; } LA197: ; } goto LA190; LA192: ; { i0 += ((NI) 1); } LA190: ; } LA189: ; } { NimStringDesc* LOC215; if (!!((chunkstart0 == ((NI) 0)))) goto LA213; LOC215 = (NimStringDesc*)0; LOC215 = copyStr((*cppname0).data, chunkstart0); add_177487_2381377266(&result0, LOC215); } goto LA211; LA213: ; { result0 = HEX26_177447_2381377266(cppname0, ((NimStringDesc*) &T839829468_82)); { NI i_533516_839829468; NI HEX3Atmp_533664_839829468; NI LOC218; NI res_533667_839829468; i_533516_839829468 = (NI)0; HEX3Atmp_533664_839829468 = (NI)0; LOC218 = (NI)0; LOC218 = len_293339_850551059(typ0); HEX3Atmp_533664_839829468 = (NI)(LOC218 - ((NI) 2)); res_533667_839829468 = ((NI) 1); { while (1) { Ropeobj177006* LOC225; if (!(res_533667_839829468 <= HEX3Atmp_533664_839829468)) goto LA220; i_533516_839829468 = res_533667_839829468; { if (!(((NI) 1) < i_533516_839829468)) goto LA223; add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_83)); } LA223: ; LOC225 = (Ropeobj177006*)0; LOC225 = gettypedescaux_531503_839829468(m0, (*typ0).sons->data[i_533516_839829468], check0); add_177482_2381377266(&result0, LOC225); res_533667_839829468 += ((NI) 1); } LA220: ; } } add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_84)); } LA211: ; LOC226 = (Ropeobj177006*)0; LOC226 = getrecorddesc_532643_839829468(m0, t_532942_839829468, result0, check0); } goto LA182; LA186: ; { Tidobj197004* LOC241; TNimObject* LOC242; Ropeobj177006* recdesc0; result0 = cachegettype_531591_839829468((*m0).forwtypecache, t_532942_839829468); { Tidobj197004* LOC239; TNimObject* LOC240; if (!(result0 == NIM_NIL)) goto LA230; result0 = gettypename_531313_839829468(t_532942_839829468); { NIM_BOOL LOC234; NimStringDesc* LOC237; TY530811 LOC238; LOC234 = (NIM_BOOL)0; LOC234 = isimportedtype_531449_839829468(t_532942_839829468); if (!!(LOC234)) goto LA235; LOC237 = (NimStringDesc*)0; LOC237 = getforwardstructformat_532015_839829468(m0); memset((void*)LOC238, 0, sizeof(LOC238)); LOC238[0] = structorunion_532001_839829468(t_532942_839829468); LOC238[1] = result0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 2))- 0], LOC237, LOC238, 2); } LA235: ; LOC239 = (Tidobj197004*)0; LOC239 = &t_532942_839829468->Sup; LOC240 = (TNimObject*)0; LOC240 = &result0->Sup; idtableput_297094_2984716966((&(*m0).forwtypecache), LOC239, LOC240); } LA230: ; LOC241 = (Tidobj197004*)0; LOC241 = &t_532942_839829468->Sup; LOC242 = (TNimObject*)0; LOC242 = &result0->Sup; idtableput_297094_2984716966((&(*m0).typecache), LOC241, LOC242); { if (!!(((*t_532942_839829468).kind == ((Ttypekind290244) 18)))) goto LA245; recdesc0 = getrecorddesc_532643_839829468(m0, t_532942_839829468, result0, check0); } goto LA243; LA245: ; { recdesc0 = gettupledesc_532777_839829468(m0, t_532942_839829468, result0, check0); } LA243: ; { NIM_BOOL LOC250; LOC250 = (NIM_BOOL)0; LOC250 = isimportedtype_531449_839829468(t_532942_839829468); if (!!(LOC250)) goto LA251; add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], recdesc0); } LA251: ; } LA182: ; } break; case ((Ttypekind290244) 19): { Ttype290840* LOC254; Ropeobj177006* LOC255; Tidobj197004* LOC256; TNimObject* LOC257; LOC254 = (Ttype290840*)0; LOC254 = lastson_293377_850551059(t_532942_839829468); LOC255 = (Ropeobj177006*)0; LOC255 = gettypename_531313_839829468(LOC254); result0 = HEX26_177447_2381377266(LOC255, ((NimStringDesc*) &T839829468_105)); LOC256 = (Tidobj197004*)0; LOC256 = &t_532942_839829468->Sup; LOC257 = (TNimObject*)0; LOC257 = &result0->Sup; idtableput_297094_2984716966((&(*m0).typecache), LOC256, LOC257); { NIM_BOOL LOC260; NI s0; NI64 LOC263; LOC260 = (NIM_BOOL)0; LOC260 = isimportedtype_531449_839829468(t_532942_839829468); if (!!(LOC260)) goto LA261; LOC263 = (NI64)0; LOC263 = getsize_318135_3876443242(t_532942_839829468); s0 = ((NI) (LOC263)); switch (s0) { case ((NI) 1): case ((NI) 2): case ((NI) 4): case ((NI) 8): { TY530811 LOC265; memset((void*)LOC265, 0, sizeof(LOC265)); LOC265[0] = result0; LOC265[1] = rope_177401_2381377266(((NI64) ((NI)(s0 * ((NI) 8))))); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_106), LOC265, 2); } break; default: { TY530811 LOC267; NI64 LOC268; memset((void*)LOC267, 0, sizeof(LOC267)); LOC267[0] = result0; LOC268 = (NI64)0; LOC268 = getsize_318135_3876443242(t_532942_839829468); LOC267[1] = rope_177401_2381377266(LOC268); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_107), LOC267, 2); } break; } } LA261: ; } break; case ((Ttypekind290244) 11): case ((Ttypekind290244) 13): case ((Ttypekind290244) 15): case ((Ttypekind290244) 46): case ((Ttypekind290244) 47): case ((Ttypekind290244) 49): case ((Ttypekind290244) 8): { Ttype290840* LOC270; LOC270 = (Ttype290840*)0; LOC270 = lastson_293377_850551059(t_532942_839829468); result0 = gettypedescaux_531503_839829468(m0, LOC270, check0); } break; default: { NimStringDesc* LOC272; LOC272 = (NimStringDesc*)0; LOC272 = rawNewString(reprEnum((NI)(*t_532942_839829468).kind, (&NTI290244))->Sup.len + 16); appendString(LOC272, ((NimStringDesc*) &T839829468_108)); appendString(LOC272, reprEnum((NI)(*t_532942_839829468).kind, (&NTI290244))); appendChar(LOC272, 41); internalerror_194113_155036129(LOC272); result0 = NIM_NIL; } break; } excl_266841_2627731572(check0, (*t_532942_839829468).Sup.id); }BeforeRet: ; return result0; } static N_INLINE(NIM_BOOL, iscompiletimeonly_326706_3876443242)(Ttype290840* t0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((*t0).kind == ((Ttypekind290244) 8) || (*t0).kind == ((Ttypekind290244) 59)); return result0; } N_NIMCALL(Tstorageloc290812, paramstorageloc_532098_839829468)(Tsym290834* param0) { Tstorageloc290812 result0; result0 = (Tstorageloc290812)0; { Ttype290840* LOC3; LOC3 = (Ttype290840*)0; LOC3 = skiptypes_294099_850551059((*param0).typ, 8388864); if (!!(((*LOC3).kind == ((Ttypekind290244) 16) || (*LOC3).kind == ((Ttypekind290244) 27) || (*LOC3).kind == ((Ttypekind290244) 48) || (*LOC3).kind == ((Ttypekind290244) 4)))) goto LA4; result0 = ((Tstorageloc290812) 2); } goto LA1; LA4: ; { result0 = ((Tstorageloc290812) 0); } LA1: ; return result0; } N_NIMCALL(NIM_BOOL, ccgintroducedptr_531609_839829468)(Tsym290834* s0) { NIM_BOOL result0; Ttype290840* pt0; { result0 = (NIM_BOOL)0; pt0 = skiptypes_294099_850551059((*s0).typ, IL64(211106232576256)); { if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag290431) 13))&31U)))!=0)) goto LA3; result0 = NIM_TRUE; goto BeforeRet; } goto LA1; LA3: ; { if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag290431) 12))&31U)))!=0)) goto LA6; result0 = NIM_FALSE; goto BeforeRet; } goto LA1; LA6: ; LA1: ; switch ((*pt0).kind) { case ((Ttypekind290244) 17): { { NIM_BOOL LOC11; NI64 LOC13; LOC11 = (NIM_BOOL)0; LOC11 = (((*s0).options &(1U<<((NU)(((Toption168009) 18))&31U)))!=0); if (LOC11) goto LA12; LOC13 = (NI64)0; LOC13 = getsize_318135_3876443242(pt0); LOC11 = (((NI64) ((NI)(floatsize_175642_4151366050 * ((NI) 2)))) < LOC13); LA12: ; if (!LOC11) goto LA14; result0 = NIM_TRUE; } goto LA9; LA14: ; { NIM_BOOL LOC17; LOC17 = (NIM_BOOL)0; LOC17 = (((*pt0).flags &(1U<<((NU)(((Ttypeflag290431) 2))&31U)))!=0); if (!(LOC17)) goto LA18; LOC17 = ((*pt0).sons->data[((NI) 0)] == NIM_NIL); LA18: ; if (!LOC17) goto LA19; result0 = NIM_FALSE; } goto LA9; LA19: ; { result0 = NIM_TRUE; } LA9: ; } break; case ((Ttypekind290244) 18): { NIM_BOOL LOC23; NI64 LOC24; LOC23 = (NIM_BOOL)0; LOC24 = (NI64)0; LOC24 = getsize_318135_3876443242(pt0); LOC23 = (((NI64) ((NI)(floatsize_175642_4151366050 * ((NI) 2)))) < LOC24); if (LOC23) goto LA25; LOC23 = (((*s0).options &(1U<<((NU)(((Toption168009) 18))&31U)))!=0); LA25: ; result0 = LOC23; } break; default: { result0 = NIM_FALSE; } break; } }BeforeRet: ; return result0; } N_NIMCALL(Tctypekind527007, mapreturntype_531445_839829468)(Ttype290840* typ0) { Tctypekind527007 result0; result0 = (Tctypekind527007)0; result0 = maptype_531393_839829468(typ0); return result0; } N_NIMCALL(void, genprocparams_532115_839829468)(Tcgen527027* m0, Ttype290840* t0, Ropeobj177006** rettype0, Ropeobj177006** params0, Intset266030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0) { unsureAsgnRef((void**) (&(*params0)), NIM_NIL); { NIM_BOOL LOC3; TY531289 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).sons->data[((NI) 0)] == NIM_NIL); if (LOC3) goto LA4; LOC3 = isinvalidreturntype_531548_839829468((*t0).sons->data[((NI) 0)]); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); unsureAsgnRef((void**) (&(*rettype0)), HEX25_177905_2381377266(((NimStringDesc*) &T839829468_26), LOC7, 0)); } goto LA1; LA5: ; { unsureAsgnRef((void**) (&(*rettype0)), gettypedescaux_531503_839829468(m0, (*t0).sons->data[((NI) 0)], check0)); } LA1: ; { NI i_532152_839829468; NI HEX3Atmp_532353_839829468; NI LOC10; NI res_532356_839829468; i_532152_839829468 = (NI)0; HEX3Atmp_532353_839829468 = (NI)0; LOC10 = (NI)0; LOC10 = sonslen_293351_850551059((*t0).n); HEX3Atmp_532353_839829468 = (NI)(LOC10 - ((NI) 1)); res_532356_839829468 = ((NI) 1); { while (1) { if (!(res_532356_839829468 <= HEX3Atmp_532353_839829468)) goto LA12; i_532152_839829468 = res_532356_839829468; { Tsym290834* param0; Ropeobj177006* LOC29; Tstorageloc290812 LOC30; TY531289 LOC45; Ropeobj177006* LOC46; Ttype290840* arr0; NI j0; { if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_532152_839829468]).kind == ((Tnodekind290020) 3)))) goto LA16; internalerror_194100_155036129((*(*t0).n).info, ((NimStringDesc*) &T839829468_109)); } LA16: ; param0 = (*(*(*t0).n).kindU.S6.sons->data[i_532152_839829468]).kindU.S4.sym; { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = iscompiletimeonly_326706_3876443242((*param0).typ); if (!LOC20) goto LA21; goto LA13; } LA21: ; { TY531289 LOC27; Ropeobj177006* LOC28; if (!!(((*params0) == NIM_NIL))) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Ropeobj177006*)0; LOC28 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC27, 0); add_177482_2381377266(params0, LOC28); } LA25: ; LOC29 = (Ropeobj177006*)0; LOC29 = manglename_531205_839829468(param0); LOC30 = (Tstorageloc290812)0; LOC30 = paramstorageloc_532098_839829468(param0); fillloc_530282_839829468((&(*param0).loc), ((Tlockind290808) 4), (*param0).typ, LOC29, LOC30); { NIM_BOOL LOC33; Ropeobj177006* LOC36; TY531289 LOC37; Ropeobj177006* LOC38; LOC33 = (NIM_BOOL)0; LOC33 = ccgintroducedptr_531609_839829468(param0); if (!LOC33) goto LA34; LOC36 = (Ropeobj177006*)0; LOC36 = gettypedescweak_532079_839829468(m0, (*param0).typ, check0); add_177482_2381377266(params0, LOC36); memset((void*)LOC37, 0, sizeof(LOC37)); LOC38 = (Ropeobj177006*)0; LOC38 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_53), LOC37, 0); add_177482_2381377266(params0, LOC38); (*param0).loc.flags |= ((NU16)1)<<((((Tlocflag290810) 0))%(sizeof(NU16)*8)); (*param0).loc.s = ((Tstorageloc290812) 0); } goto LA31; LA34: ; { Ropeobj177006* LOC42; if (!weakdep0) goto LA40; LOC42 = (Ropeobj177006*)0; LOC42 = gettypedescweak_532079_839829468(m0, (*param0).typ, check0); add_177482_2381377266(params0, LOC42); } goto LA31; LA40: ; { Ropeobj177006* LOC44; LOC44 = (Ropeobj177006*)0; LOC44 = gettypedescaux_531503_839829468(m0, (*param0).typ, check0); add_177482_2381377266(params0, LOC44); } LA31: ; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (Ropeobj177006*)0; LOC46 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_111), LOC45, 0); add_177482_2381377266(params0, LOC46); add_177482_2381377266(params0, (*param0).loc.r); arr0 = (*param0).typ; { if (!((*arr0).kind == ((Ttypekind290244) 23))) goto LA49; arr0 = (*arr0).sons->data[((NI) 0)]; } LA49: ; j0 = ((NI) 0); { while (1) { TY530811 LOC57; if (!((*arr0).kind == ((Ttypekind290244) 27) || (*arr0).kind == ((Ttypekind290244) 48))) goto LA52; { if (!((*(*param0).typ).kind == ((Ttypekind290244) 23))) goto LA55; (*param0).loc.s = ((Tstorageloc290812) 0); } LA55: ; memset((void*)LOC57, 0, sizeof(LOC57)); LOC57[0] = (*param0).loc.r; LOC57[1] = rope_177401_2381377266(((NI64) (j0))); addf_178205_2381377266(params0, ((NimStringDesc*) &T839829468_112), LOC57, 2); j0 += ((NI) 1); arr0 = (*arr0).sons->data[((NI) 0)]; } LA52: ; } } LA13: ; res_532356_839829468 += ((NI) 1); } LA12: ; } } { NIM_BOOL LOC60; Ttype290840* arr0; TY531289 LOC76; LOC60 = (NIM_BOOL)0; LOC60 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); if (!(LOC60)) goto LA61; LOC60 = isinvalidreturntype_531548_839829468((*t0).sons->data[((NI) 0)]); LA61: ; if (!LOC60) goto LA62; arr0 = (*t0).sons->data[((NI) 0)]; { if (!!(((*params0) == NIM_NIL))) goto LA66; add_177487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA66: ; { Tctypekind527007 LOC70; Ropeobj177006* LOC73; LOC70 = (Tctypekind527007)0; LOC70 = mapreturntype_531445_839829468((*t0).sons->data[((NI) 0)]); if (!!((LOC70 == ((Tctypekind527007) 17)))) goto LA71; LOC73 = (Ropeobj177006*)0; LOC73 = gettypedescweak_532079_839829468(m0, arr0, check0); add_177482_2381377266(params0, LOC73); add_177487_2381377266(params0, ((NimStringDesc*) &T839829468_53)); } goto LA68; LA71: ; { Ropeobj177006* LOC75; LOC75 = (Ropeobj177006*)0; LOC75 = gettypedescaux_531503_839829468(m0, arr0, check0); add_177482_2381377266(params0, LOC75); } LA68: ; memset((void*)LOC76, 0, sizeof(LOC76)); addf_178205_2381377266(params0, ((NimStringDesc*) &T839829468_113), LOC76, 0); } LA62: ; { NIM_BOOL LOC79; LOC79 = (NIM_BOOL)0; LOC79 = ((*t0).callconv == ((Tcallingconvention290002) 8)); if (!(LOC79)) goto LA80; LOC79 = declareenvironment0; LA80: ; if (!LOC79) goto LA81; { if (!!(((*params0) == NIM_NIL))) goto LA85; add_177487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA85: ; add_177487_2381377266(params0, ((NimStringDesc*) &T839829468_114)); } LA81: ; { if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag290431) 0))&31U)))!=0)) goto LA89; { if (!!(((*params0) == NIM_NIL))) goto LA93; add_177487_2381377266(params0, ((NimStringDesc*) &T839829468_110)); } LA93: ; add_177487_2381377266(params0, ((NimStringDesc*) &T839829468_115)); } LA89: ; { if (!((*params0) == NIM_NIL)) goto LA97; add_177487_2381377266(params0, ((NimStringDesc*) &T839829468_116)); } goto LA95; LA97: ; { add_177487_2381377266(params0, ((NimStringDesc*) &T839829468_117)); } LA95: ; unsureAsgnRef((void**) (&(*params0)), HEX26_177452_2381377266(((NimStringDesc*) &T839829468_118), (*params0))); } N_NIMCALL(Ropeobj177006*, genprocheader_533867_839829468)(Tcgen527027* m0, Tsym290834* prc0) { Ropeobj177006* result0; Ropeobj177006* rettype0; Ropeobj177006* params0; Intset266030 check0; Ropeobj177006* LOC13; result0 = (Ropeobj177006*)0; rettype0 = (Ropeobj177006*)0; params0 = (Ropeobj177006*)0; genclinedir_530813_839829468(&result0, (*prc0).info); { if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag290810) 5))&15U)))!=0)) goto LA3; { if (!(((*m0).flags &(1U<<((NU)(((Codegenflag527025) 3))&7U)))!=0)) goto LA7; add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_22)); } goto LA5; LA7: ; { add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_23)); } LA5: ; } goto LA1; LA3: ; { if (!((*(*prc0).typ).callconv == ((Tcallingconvention290002) 5))) goto LA11; add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_24)); } goto LA1; LA11: ; LA1: ; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_266885_2627731572((&check0)); LOC13 = (Ropeobj177006*)0; LOC13 = manglename_531205_839829468(prc0); fillloc_530282_839829468((&(*prc0).loc), ((Tlockind290808) 7), (*prc0).typ, LOC13, ((Tstorageloc290812) 0)); genprocparams_532115_839829468(m0, (*prc0).typ, &rettype0, &params0, (&check0), NIM_TRUE, NIM_FALSE); { TY533235 LOC18; if (!(*prc0).constraint == 0) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rope_177277_2381377266(Callingconvtostr_531585_839829468[((*(*prc0).typ).callconv)- 0]); LOC18[1] = rettype0; LOC18[2] = (*prc0).loc.r; LOC18[3] = params0; addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_119), LOC18, 4); } goto LA14; LA16: ; { TY533238 LOC20; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rettype0; LOC20[1] = (*prc0).loc.r; LOC20[2] = params0; result0 = HEX25_177905_2381377266((*(*prc0).constraint).kindU.S3.strval, LOC20, 3); } LA14: ; return result0; } static N_INLINE(Tnode290802*, HEX5BHEX5D_291238_850551059)(Tnode290802* n0, NI i0) { Tnode290802* result0; result0 = (Tnode290802*)0; result0 = (*n0).kindU.S6.sons->data[i0]; return result0; } N_NIMCALL(Tnode290802*, easyresultasgn_558191_839829468)(Tnode290802* n0) { Tnode290802* result0; { result0 = (Tnode290802*)0; switch ((*n0).kind) { case ((Tnodekind290020) 115): case ((Tnodekind290020) 126): { NI i0; i0 = ((NI) 0); { while (1) { NIM_BOOL LOC4; NI LOC5; Tnode290802* LOC7; LOC4 = (NIM_BOOL)0; LOC5 = (NI)0; LOC5 = len_291081_850551059(n0); LOC4 = (i0 < LOC5); if (!(LOC4)) goto LA6; LOC7 = (Tnode290802*)0; LOC7 = HEX5BHEX5D_291238_850551059(n0, i0); LOC4 = ((*LOC7).kind == ((Tnodekind290020) 1) || (*LOC7).kind >= ((Tnodekind290020) 79) && (*LOC7).kind <= ((Tnodekind290020) 81) || (*LOC7).kind == ((Tnodekind290020) 84) || (*LOC7).kind == ((Tnodekind290020) 98) || (*LOC7).kind == ((Tnodekind290020) 101) || (*LOC7).kind == ((Tnodekind290020) 125)); LA6: ; if (!LOC4) goto LA3; i0 += ((NI) 1); } LA3: ; } { NI LOC10; Tnode290802* LOC13; LOC10 = (NI)0; LOC10 = len_291081_850551059(n0); if (!(i0 < LOC10)) goto LA11; LOC13 = (Tnode290802*)0; LOC13 = HEX5BHEX5D_291238_850551059(n0, i0); result0 = easyresultasgn_558191_839829468(LOC13); } LA11: ; } break; case ((Tnodekind290020) 73): case ((Tnodekind290020) 74): { { NIM_BOOL LOC17; Tnode290802* LOC18; Tnode290802* LOC20; LOC17 = (NIM_BOOL)0; LOC18 = (Tnode290802*)0; LOC18 = HEX5BHEX5D_291238_850551059(n0, ((NI) 0)); LOC17 = ((*LOC18).kind == ((Tnodekind290020) 3)); if (!(LOC17)) goto LA19; LOC20 = (Tnode290802*)0; LOC20 = HEX5BHEX5D_291238_850551059(n0, ((NI) 0)); LOC17 = (((Tsymkind290435) 11) == (*(*LOC20).kindU.S4.sym).kind); LA19: ; if (!LOC17) goto LA21; (*n0).flags |= ((NU16)1)<<((((Tnodeflag290427) 14))%(sizeof(NU16)*8)); result0 = HEX5BHEX5D_291238_850551059(n0, ((NI) 1)); goto BeforeRet; } LA21: ; } break; case ((Tnodekind290020) 109): { { NI LOC26; Tnode290802* LOC29; LOC26 = (NI)0; LOC26 = len_291081_850551059(n0); if (!(((NI) 0) < LOC26)) goto LA27; LOC29 = (Tnode290802*)0; LOC29 = HEX5BHEX5D_291238_850551059(n0, ((NI) 0)); result0 = easyresultasgn_558191_839829468(LOC29); { if (!!((result0 == NIM_NIL))) goto LA32; (*n0).flags |= ((NU16)1)<<((((Tnodeflag290427) 14))%(sizeof(NU16)*8)); } LA32: ; } LA27: ; } break; default: { } break; } }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj177006*, gettypedesc_533671_839829468)(Tcgen527027* m0, Ttype290840* typ0) { Ropeobj177006* result0; Intset266030 check0; result0 = (Ropeobj177006*)0; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_266885_2627731572((&check0)); result0 = gettypedescaux_531503_839829468(m0, typ0, (&check0)); return result0; } N_NIMCALL(Ropeobj177006*, localvardecl_536532_839829468)(Tcproc527021* p0, Tsym290834* s0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { Ropeobj177006* LOC5; if (!((*s0).loc.k == ((Tlockind290808) 0))) goto LA3; LOC5 = (Ropeobj177006*)0; LOC5 = manglename_531205_839829468(s0); fillloc_530282_839829468((&(*s0).loc), ((Tlockind290808) 2), (*s0).typ, LOC5, ((Tstorageloc290812) 2)); { if (!((*s0).kind == ((Tsymkind290435) 9))) goto LA8; (*s0).loc.flags |= ((NU16)1)<<((((Tlocflag290810) 2))%(sizeof(NU16)*8)); } LA8: ; } LA3: ; result0 = gettypedesc_533671_839829468((*p0).module, (*s0).loc.t); { if (!(*s0).constraint == 0) goto LA12; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag290184) 8))&31U)))!=0)) goto LA16; add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_121)); } LA16: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag290184) 7))&31U)))!=0)) goto LA20; add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_122)); } LA20: ; add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_111)); add_177482_2381377266(&result0, (*s0).loc.r); } goto LA10; LA12: ; { TY530811 LOC23; memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = result0; LOC23[1] = (*s0).loc.r; result0 = HEX25_177905_2381377266((*(*s0).constraint).kindU.S3.strval, LOC23, 2); } LA10: ; return result0; } N_NIMCALL(void, initloc_530273_839829468)(Tloc290816* result0, Tlockind290808 k0, Ttype290840* typ0, Tstorageloc290812 s0) { (*result0).k = k0; (*result0).s = s0; unsureAsgnRef((void**) (&(*result0).t), typ0); unsureAsgnRef((void**) (&(*result0).r), NIM_NIL); (*result0).flags = 0; } N_NIMCALL(void, initlocexprsingleuse_537289_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* result0) { initloc_530273_839829468(result0, ((Tlockind290808) 0), (*e0).typ, ((Tstorageloc290812) 0)); (*result0).flags |= ((NU16)1)<<((((Tlocflag290810) 8))%(sizeof(NU16)*8)); expr_537248_839829468(p0, e0, result0); } static N_INLINE(Ropeobj177006**, s_527179_3723162438)(Tcproc527021* p0, Tcprocsection527011 s0) { Ropeobj177006** result0; result0 = (Ropeobj177006**)0; result0 = &(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].sections[(s0)- 0]; return result0; } N_NIMCALL(Ropeobj177006*, indentline_530656_839829468)(Tcproc527021* p0, Ropeobj177006* r0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = r0; { NI i_530680_839829468; NI HEX3Atmp_530683_839829468; NI res_530686_839829468; i_530680_839829468 = (NI)0; HEX3Atmp_530683_839829468 = (NI)0; HEX3Atmp_530683_839829468 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); res_530686_839829468 = ((NI) 0); { while (1) { if (!(res_530686_839829468 <= HEX3Atmp_530683_839829468)) goto LA3; i_530680_839829468 = res_530686_839829468; prepend_177893_2381377266(&result0, indent_530655_839829468); res_530686_839829468 += ((NI) 1); } LA3: ; } } return result0; } N_NIMCALL(void, linefmt_530714_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0) { Ropeobj177006** LOC1; Ropeobj177006* LOC2; Ropeobj177006* LOC3; LOC1 = (Ropeobj177006**)0; LOC1 = s_527179_3723162438(p0, s0); LOC2 = (Ropeobj177006*)0; LOC2 = ropecg_530407_839829468((*p0).module, frmt0, args0, args0Len0); LOC3 = (Ropeobj177006*)0; LOC3 = indentline_530656_839829468(p0, LOC2); add_177482_2381377266(LOC1, LOC3); } N_NIMCALL(Ropeobj177006*, rdloc_536188_839829468)(Tloc290816 a0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = a0.r; { TY177507 LOC5; if (!((a0.flags &(1U<<((NU)(((Tlocflag290810) 0))&15U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = result0; result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_124), LOC5, 1); } LA3: ; return result0; } N_NIMCALL(void, line_530690_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, Ropeobj177006* r0) { Ropeobj177006** LOC1; Ropeobj177006* LOC2; LOC1 = (Ropeobj177006**)0; LOC1 = s_527179_3723162438(p0, s0); LOC2 = (Ropeobj177006*)0; LOC2 = indentline_530656_839829468(p0, r0); add_177482_2381377266(LOC1, LOC2); } N_NIMCALL(void, linef_530700_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0) { Ropeobj177006** LOC1; Ropeobj177006* LOC2; Ropeobj177006* LOC3; LOC1 = (Ropeobj177006**)0; LOC1 = s_527179_3723162438(p0, s0); LOC2 = (Ropeobj177006*)0; LOC2 = HEX25_177905_2381377266(frmt0, args0, args0Len0); LOC3 = (Ropeobj177006*)0; LOC3 = indentline_530656_839829468(p0, LOC2); add_177482_2381377266(LOC1, LOC3); } N_NIMCALL(void, gentypeinfoauxbase_533960_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ttype290840* origtype0, Ropeobj177006* name0, Ropeobj177006* base0) { NI nimtypekind0; Ropeobj177006* size0; TY533235 LOC17; NI flags0; Ropeobj177006* LOC33; TY530811 LOC34; NimStringDesc* LOC35; nimtypekind0 = (NI)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isobjlackingtypefield_531513_839829468(typ0); if (!LOC3) goto LA4; nimtypekind0 = ((NI) 18); } goto LA1; LA4: ; { nimtypekind0 = ((NI) ((*typ0).kind)); } LA1: ; size0 = (Ropeobj177006*)0; { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 0))&31U)))!=0)) goto LA9; size0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_133)); } goto LA7; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC12) goto LA13; LOC12 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; size0 = gettypedesc_533671_839829468(m0, origtype0); } goto LA7; LA14: ; { size0 = gettypedesc_533671_839829468(m0, typ0); } LA7: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = name0; LOC17[1] = size0; LOC17[2] = rope_177401_2381377266(((NI64) (nimtypekind0))); LOC17[3] = base0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_134), LOC17, 4); flags0 = ((NI) 0); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = containsgarbagecollectedref_318117_3876443242(typ0); if (!!(LOC20)) goto LA21; flags0 = (NI)(flags0 | ((NI) 1)); } LA21: ; { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = canformacycle_318123_3876443242(typ0); if (!!(LOC25)) goto LA26; flags0 = (NI)(flags0 | ((NI) 2)); } LA26: ; { TY530811 LOC32; if (!!((flags0 == ((NI) 0)))) goto LA30; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = name0; LOC32[1] = rope_177401_2381377266(((NI64) (flags0))); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_135), LOC32, 2); } LA30: ; LOC33 = (Ropeobj177006*)0; LOC33 = cgsym_530403_839829468(m0, ((NimStringDesc*) &T839829468_129)); memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = name0; LOC35 = (NimStringDesc*)0; LOC35 = typetostring_318017_3876443242(typ0, ((Tprefereddesc318011) 0)); LOC34[1] = rope_177277_2381377266(LOC35); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_136), LOC34, 2); } N_NIMCALL(Ropeobj177006*, getnimnode_533945_839829468)(Tcgen527027* m0) { Ropeobj177006* result0; TY530811 LOC1; result0 = (Ropeobj177006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = (*m0).typenodesname; LOC1[1] = rope_177401_2381377266(((NI64) ((*m0).typenodes))); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_138), LOC1, 2); (*m0).typenodes += ((NI) 1); return result0; } N_NIMCALL(void, gentupleinfo_534549_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0) { Ropeobj177006* LOC1; Ropeobj177006* expr0; NI length0; TY530811 LOC15; LOC1 = (Ropeobj177006*)0; LOC1 = rope_177277_2381377266(((NimStringDesc*) &T839829468_18)); gentypeinfoauxbase_533960_839829468(m0, typ0, typ0, name0, LOC1); expr0 = getnimnode_533945_839829468(m0); length0 = sonslen_293327_850551059(typ0); { Ropeobj177006* tmp0; TY530811 LOC6; TY533238 LOC12; if (!(((NI) 0) < length0)) goto LA4; tmp0 = gettempname_531596_839829468(m0); memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = tmp0; LOC6[1] = rope_177401_2381377266(((NI64) (length0))); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC6, 2); { NI i_534571_839829468; NI HEX3Atmp_534590_839829468; NI res_534593_839829468; i_534571_839829468 = (NI)0; HEX3Atmp_534590_839829468 = (NI)0; HEX3Atmp_534590_839829468 = (NI)(length0 - ((NI) 1)); res_534593_839829468 = ((NI) 0); { while (1) { Ttype290840* a0; Ropeobj177006* tmp20; TY533238 LOC10; TY533235 LOC11; if (!(res_534593_839829468 <= HEX3Atmp_534590_839829468)) goto LA9; i_534571_839829468 = res_534593_839829468; a0 = (*typ0).sons->data[i_534571_839829468]; tmp20 = getnimnode_533945_839829468(m0); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = tmp0; LOC10[1] = rope_177401_2381377266(((NI64) (i_534571_839829468))); LOC10[2] = tmp20; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC10, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = tmp20; LOC11[1] = gettypedesc_533671_839829468(m0, typ0); LOC11[2] = rope_177401_2381377266(((NI64) (i_534571_839829468))); LOC11[3] = gentypeinfo_533941_839829468(m0, a0); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_141), LOC11, 4); res_534593_839829468 += ((NI) 1); } LA9: ; } } memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = expr0; LOC12[1] = rope_177401_2381377266(((NI64) (length0))); LOC12[2] = tmp0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC12, 3); } goto LA2; LA4: ; { TY530811 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = expr0; LOC14[1] = rope_177401_2381377266(((NI64) (length0))); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC14, 2); } LA2: ; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = name0; LOC15[1] = expr0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC15, 2); } N_NIMCALL(Ttype290840*, fakeclosuretype_535010_839829468)(Tsym290834* owner0) { Ttype290840* result0; Ttype290840* LOC1; Ttype290840* r0; Ttype290840* LOC2; result0 = (Ttype290840*)0; result0 = newtype_293107_850551059(((Ttypekind290244) 18), owner0); LOC1 = (Ttype290840*)0; LOC1 = newtype_293107_850551059(((Ttypekind290244) 26), owner0); rawaddson_294394_850551059(result0, LOC1); r0 = newtype_293107_850551059(((Ttypekind290244) 22), owner0); LOC2 = (Ttype290840*)0; LOC2 = newtype_293107_850551059(((Ttypekind290244) 18), owner0); rawaddson_294394_850551059(r0, LOC2); rawaddson_294394_850551059(result0, r0); return result0; } N_NIMCALL(void, gentypeinfoaux_534027_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ttype290840* origtype0, Ropeobj177006* name0) { Ropeobj177006* base0; base0 = (Ropeobj177006*)0; { NIM_BOOL LOC3; NI LOC4; Ttype290840* x0; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = sonslen_293327_850551059(typ0); LOC3 = (((NI) 0) < LOC4); if (!(LOC3)) goto LA5; LOC3 = !(((*typ0).sons->data[((NI) 0)] == NIM_NIL)); LA5: ; if (!LOC3) goto LA6; x0 = (*typ0).sons->data[((NI) 0)]; { if (!((*typ0).kind == ((Ttypekind290244) 17))) goto LA10; x0 = skiptypes_294099_850551059(x0, IL64(211106247215360)); } LA10: ; base0 = gentypeinfo_533941_839829468(m0, x0); } goto LA1; LA6: ; { base0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_18)); } LA1: ; gentypeinfoauxbase_533960_839829468(m0, typ0, origtype0, name0, base0); } static N_INLINE(NIM_BOOL, iscomplexvaluetype_536317_839829468)(Ttype290840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC3; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*t0).kind == ((Ttypekind290244) 16) || (*t0).kind == ((Ttypekind290244) 4) || (*t0).kind == ((Ttypekind290244) 19) || (*t0).kind == ((Ttypekind290244) 18) || (*t0).kind == ((Ttypekind290244) 17)); if (LOC1) goto LA2; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind290244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention290002) 8)); LA4: ; LOC1 = LOC3; LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, usestringh_530345_839829468)(Tcgen527027* m0) { { NIM_BOOL LOC5; if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag527025) 4))&7U)))!=0))) goto LA3; (*m0).flags |= ((NU8)1)<<((((Codegenflag527025) 4))%(sizeof(NU8)*8)); LOC5 = (NIM_BOOL)0; LOC5 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_151)); } LA3: ; } N_NIMCALL(Ropeobj177006*, addrloc_536204_839829468)(Tloc290816 a0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = a0.r; { NIM_BOOL LOC3; Tctypekind527007 LOC5; Ropeobj177006* LOC8; LOC3 = (NIM_BOOL)0; LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag290810) 0))&15U)))!=0)); if (!(LOC3)) goto LA4; LOC5 = (Tctypekind527007)0; LOC5 = maptype_531393_839829468(a0.t); LOC3 = !((LOC5 == ((Tctypekind527007) 17))); LA4: ; if (!LOC3) goto LA6; LOC8 = (Ropeobj177006*)0; LOC8 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_128), result0); result0 = HEX26_177447_2381377266(LOC8, ((NimStringDesc*) &T839829468_117)); } LA6: ; return result0; } N_NIMCALL(void, genobjectinit_536242_839829468)(Tcproc527021* p0, Tcprocsection527011 section0, Ttype290840* t0, Tloc290816 a0, NIM_BOOL takeaddr0) { Ttypefieldresult318145 LOC1; LOC1 = (Ttypefieldresult318145)0; LOC1 = analyseobjectwithtypefield_318149_3876443242(t0); switch (LOC1) { case ((Ttypefieldresult318145) 0): { } break; case ((Ttypefieldresult318145) 1): { Ropeobj177006* r0; Ttype290840* s0; TY530811 LOC19; r0 = rdloc_536188_839829468(a0); { TY177507 LOC8; if (!!(takeaddr0)) goto LA6; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = r0; r0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_124), LOC8, 1); } LA6: ; s0 = skiptypes_294099_850551059(t0, IL64(211106232576256)); { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC11) goto LA12; LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA12: ; if (!!(LOC11)) goto LA13; { while (1) { NIM_BOOL LOC17; LOC17 = (NIM_BOOL)0; LOC17 = ((*s0).kind == ((Ttypekind290244) 17)); if (!(LOC17)) goto LA18; LOC17 = !(((*s0).sons->data[((NI) 0)] == NIM_NIL)); LA18: ; if (!LOC17) goto LA16; add_177487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); s0 = skiptypes_294099_850551059((*s0).sons->data[((NI) 0)], IL64(211106247215360)); } LA16: ; } } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = r0; LOC19[1] = gentypeinfo_533941_839829468((*p0).module, t0); linefmt_530714_839829468(p0, section0, ((NimStringDesc*) &T839829468_154), LOC19, 2); } break; case ((Ttypefieldresult318145) 2): { Ropeobj177006* r0; TY530811 LOC26; { if (!takeaddr0) goto LA23; r0 = addrloc_536204_839829468(a0); } goto LA21; LA23: ; { r0 = rdloc_536188_839829468(a0); } LA21: ; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = r0; LOC26[1] = gentypeinfo_533941_839829468((*p0).module, t0); linefmt_530714_839829468(p0, section0, ((NimStringDesc*) &T839829468_155), LOC26, 2); } break; } } N_NIMCALL(void, constructloc_536388_839829468)(Tcproc527021* p0, Tloc290816 loc0, NIM_BOOL istemp0) { Ttype290840* typ0; typ0 = skiptypes_294099_850551059(loc0.t, IL64(211106233624832)); { NIM_BOOL LOC3; TY530811 LOC6; LOC3 = (NIM_BOOL)0; LOC3 = iscomplexvaluetype_536317_839829468(typ0); if (!!(LOC3)) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rdloc_536188_839829468(loc0); LOC6[1] = gettypedesc_533671_839829468((*p0).module, typ0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_150), LOC6, 2); } goto LA1; LA4: ; { { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = !(istemp0); if (LOC10) goto LA11; LOC10 = containsgarbagecollectedref_318117_3876443242(loc0.t); LA11: ; if (!LOC10) goto LA12; { NIM_BOOL LOC16; TY530811 LOC19; LOC16 = (NIM_BOOL)0; LOC16 = isimportedcpptype_531476_839829468(typ0); if (!!(LOC16)) goto LA17; usestringh_530345_839829468((*p0).module); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_536204_839829468(loc0); LOC19[1] = rdloc_536188_839829468(loc0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_152), LOC19, 2); } LA17: ; } LA12: ; genobjectinit_536242_839829468(p0, ((Tcprocsection527011) 2), loc0.t, loc0, NIM_TRUE); } LA1: ; } N_NIMCALL(void, gettemp_535032_839829468)(Tcproc527021* p0, Ttype290840* t0, Tloc290816* result0, NIM_BOOL needsinit0) { Ropeobj177006* LOC1; TY530811 LOC2; (*p0).labels += ((NI) 1); LOC1 = (Ropeobj177006*)0; LOC1 = rope_177401_2381377266(((NI64) ((*p0).labels))); unsureAsgnRef((void**) (&(*result0).r), HEX26_177452_2381377266(((NimStringDesc*) &T839829468_149), LOC1)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = gettypedesc_533671_839829468((*p0).module, t0); LOC2[1] = (*result0).r; linefmt_530714_839829468(p0, ((Tcprocsection527011) 0), ((NimStringDesc*) &T839829468_54), LOC2, 2); (*result0).k = ((Tlockind290808) 1); unsureAsgnRef((void**) (&(*result0).t), t0); (*result0).s = ((Tstorageloc290812) 2); (*result0).flags = 0; constructloc_536388_839829468(p0, (*result0), !(needsinit0)); } static N_INLINE(Ropeobj177006*, parentobj_535257_839829468)(Ropeobj177006* accessor0, Tcgen527027* m0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { NIM_BOOL LOC3; TY177507 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = accessor0; result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_161), LOC7, 1); } goto LA1; LA5: ; { result0 = accessor0; } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, intliteral_537270_839829468)(NI64 i0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (IL64(-2147483648) < i0); if (!(LOC3)) goto LA4; LOC3 = (i0 <= IL64(2147483647)); LA4: ; if (!LOC3) goto LA5; result0 = rope_177401_2381377266(i0); } goto LA1; LA5: ; { TY531289 LOC10; if (!(i0 == IL64(-2147483648))) goto LA8; memset((void*)LOC10, 0, sizeof(LOC10)); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_166), LOC10, 0); } goto LA1; LA8: ; { TY177507 LOC14; if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_177401_2381377266(i0); result0 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC14, 1); } goto LA1; LA12: ; { TY531289 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_168), LOC16, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, int64literal_547430_839829468)(NI64 i0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { TY177507 LOC5; if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_177401_2381377266(i0); result0 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC5, 1); } goto LA1; LA3: ; { TY531289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_168), LOC7, 0); } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, uint64literal_547442_839829468)(NU64 i0) { Ropeobj177006* result0; NimStringDesc* LOC1; NimStringDesc* LOC2; result0 = (Ropeobj177006*)0; LOC1 = (NimStringDesc*)0; LOC2 = (NimStringDesc*)0; LOC2 = HEX24_8401_1689653243(i0); LOC1 = rawNewString(LOC2->Sup.len + 3); appendString(LOC1, LOC2); appendString(LOC1, ((NimStringDesc*) &T839829468_171)); result0 = rope_177277_2381377266(LOC1); return result0; } N_NIMCALL(Ropeobj177006*, getstrlit_547468_839829468)(Tcgen527027* m0, NimStringDesc* s0) { Ropeobj177006* result0; Ropeobj177006* LOC1; TY533238 LOC2; result0 = (Ropeobj177006*)0; LOC1 = (Ropeobj177006*)0; LOC1 = cgsym_530403_839829468(m0, ((NimStringDesc*) &T839829468_79)); result0 = gettempname_531596_839829468(m0); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = result0; LOC2[1] = makecstring_189638_155036129(s0); LOC2[2] = rope_177401_2381377266(((NI64) ((s0 ? s0->Sup.len : 0)))); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 8))- 0], ((NimStringDesc*) &T839829468_177), LOC2, 3); return result0; } N_NIMCALL(Ropeobj177006*, genliteral_547476_839829468)(Tcproc527021* p0, Tnode290802* n0, Ttype290840* ty0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { if (!(ty0 == NIM_NIL)) goto LA3; internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_165)); } LA3: ; switch ((*n0).kind) { case ((Tnodekind290020) 5) ... ((Tnodekind290020) 15): { Ttype290840* LOC6; LOC6 = (Ttype290840*)0; LOC6 = skiptypes_294099_850551059(ty0, IL64(211106242013440)); switch ((*LOC6).kind) { case ((Ttypekind290244) 2): case ((Ttypekind290244) 5): { result0 = intliteral_537270_839829468((*n0).kindU.S1.intval); } break; case ((Ttypekind290244) 1): { { TY531289 LOC13; if (!!(((*n0).kindU.S1.intval == IL64(0)))) goto LA11; memset((void*)LOC13, 0, sizeof(LOC13)); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_169), LOC13, 0); } goto LA9; LA11: ; { TY531289 LOC15; memset((void*)LOC15, 0, sizeof(LOC15)); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_170), LOC15, 0); } LA9: ; } break; case ((Ttypekind290244) 35): { result0 = int64literal_547430_839829468((*n0).kindU.S1.intval); } break; case ((Ttypekind290244) 44): { result0 = uint64literal_547442_839829468(((NU64) ((*n0).kindU.S1.intval))); } break; default: { TY530811 LOC19; Ttype290840* LOC20; memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (Ttype290840*)0; LOC20 = skiptypes_294099_850551059(ty0, IL64(211106242013440)); LOC19[0] = gettypedesc_533671_839829468((*p0).module, LOC20); LOC19[1] = intliteral_537270_839829468((*n0).kindU.S1.intval); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_172), LOC19, 2); } break; } } break; case ((Tnodekind290020) 23): { Ttype290840* t0; t0 = skiptypes_294099_850551059(ty0, IL64(211106242013440)); { NIM_BOOL LOC24; NI id0; Ropeobj177006* LOC28; LOC24 = (NIM_BOOL)0; LOC24 = ((*t0).kind == ((Ttypekind290244) 25)); if (!(LOC24)) goto LA25; LOC24 = ((*t0).callconv == ((Tcallingconvention290002) 8)); LA25: ; if (!LOC24) goto LA26; id0 = nodetabletestorset_340682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC28 = (Ropeobj177006*)0; LOC28 = rope_177401_2381377266(((NI64) (id0))); result0 = HEX26_177418_2381377266((*(*p0).module).tmpbase, LOC28); { TY530811 LOC33; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA31; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = gettypedesc_533671_839829468((*p0).module, t0); LOC33[1] = result0; addf_178205_2381377266(&(*(*p0).module).s[(((Tcfilesection527005) 8))- 0], ((NimStringDesc*) &T839829468_173), LOC33, 2); } LA31: ; } goto LA22; LA26: ; { result0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_174)); } LA22: ; } break; case ((Tnodekind290020) 20) ... ((Tnodekind290020) 22): { { TY531289 LOC40; if (!(*n0).kindU.S3.strval == 0) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); result0 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_175), LOC40, 0); } goto LA36; LA38: ; { Ttype290840* LOC42; NI id0; LOC42 = (Ttype290840*)0; LOC42 = skiptypes_294099_850551059(ty0, IL64(211106242013440)); if (!((*LOC42).kind == ((Ttypekind290244) 28))) goto LA43; id0 = nodetabletestorset_340682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); { TY177507 LOC49; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA47; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = getstrlit_547468_839829468((*p0).module, (*n0).kindU.S3.strval); result0 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_176), LOC49, 1); } goto LA45; LA47: ; { TY530811 LOC51; memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = (*(*p0).module).tmpbase; LOC51[1] = rope_177401_2381377266(((NI64) (id0))); result0 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_178), LOC51, 2); } LA45: ; } goto LA36; LA43: ; { result0 = makecstring_189638_155036129((*n0).kindU.S3.strval); } LA36: ; } break; case ((Tnodekind290020) 16) ... ((Tnodekind290020) 18): { NimStringDesc* LOC54; LOC54 = (NimStringDesc*)0; LOC54 = tostrmaxprecision_296007_3471544153((*n0).kindU.S2.floatval); result0 = rope_177277_2381377266(LOC54); } break; default: { NimStringDesc* LOC56; LOC56 = (NimStringDesc*)0; LOC56 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI290020))->Sup.len + 12); appendString(LOC56, ((NimStringDesc*) &T839829468_179)); appendString(LOC56, reprEnum((NI)(*n0).kind, (&NTI290020))); appendChar(LOC56, 41); internalerror_194100_155036129((*n0).info, LOC56); result0 = NIM_NIL; } break; } return result0; } N_NIMCALL(Ropeobj177006*, genliteral_537273_839829468)(Tcproc527021* p0, Tnode290802* n0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = genliteral_547476_839829468(p0, n0, (*n0).typ); return result0; } N_NIMCALL(void, gencaserange_535028_839829468)(Tcproc527021* p0, Tnode290802* branch0) { NI length0; length0 = len_291081_850551059(branch0); { NI j_545676_839829468; NI HEX3Atmp_545717_839829468; NI res_545720_839829468; j_545676_839829468 = (NI)0; HEX3Atmp_545717_839829468 = (NI)0; HEX3Atmp_545717_839829468 = (NI)(length0 - ((NI) 2)); res_545720_839829468 = ((NI) 0); { while (1) { if (!(res_545720_839829468 <= HEX3Atmp_545717_839829468)) goto LA3; j_545676_839829468 = res_545720_839829468; { Tnode290802* LOC6; LOC6 = (Tnode290802*)0; LOC6 = HEX5BHEX5D_291238_850551059(branch0, j_545676_839829468); if (!((*LOC6).kind == ((Tnodekind290020) 44))) goto LA7; { TY530811 LOC13; Tnode290802* LOC14; Tnode290802* LOC15; Tnode290802* LOC16; Tnode290802* LOC17; if (!((Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop271004) 0))&7U)))!=0)) goto LA11; memset((void*)LOC13, 0, sizeof(LOC13)); LOC14 = (Tnode290802*)0; LOC14 = HEX5BHEX5D_291238_850551059(branch0, j_545676_839829468); LOC15 = (Tnode290802*)0; LOC15 = HEX5BHEX5D_291238_850551059(LOC14, ((NI) 0)); LOC13[0] = genliteral_537273_839829468(p0, LOC15); LOC16 = (Tnode290802*)0; LOC16 = HEX5BHEX5D_291238_850551059(branch0, j_545676_839829468); LOC17 = (Tnode290802*)0; LOC17 = HEX5BHEX5D_291238_850551059(LOC16, ((NI) 1)); LOC13[1] = genliteral_537273_839829468(p0, LOC17); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_164), LOC13, 2); } goto LA9; LA11: ; { Tnode290802* v0; Tnode290802* LOC19; Tnode290802* LOC20; LOC19 = (Tnode290802*)0; LOC19 = HEX5BHEX5D_291238_850551059(branch0, j_545676_839829468); LOC20 = (Tnode290802*)0; LOC20 = HEX5BHEX5D_291238_850551059(LOC19, ((NI) 0)); v0 = copynode_294528_850551059(LOC20); { while (1) { Tnode290802* LOC23; Tnode290802* LOC24; TY177507 LOC25; LOC23 = (Tnode290802*)0; LOC23 = HEX5BHEX5D_291238_850551059(branch0, j_545676_839829468); LOC24 = (Tnode290802*)0; LOC24 = HEX5BHEX5D_291238_850551059(LOC23, ((NI) 1)); if (!((*v0).kindU.S1.intval <= (*LOC24).kindU.S1.intval)) goto LA22; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = genliteral_537273_839829468(p0, v0); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_180), LOC25, 1); (*v0).kindU.S1.intval += ((NI) 1); } LA22: ; } } LA9: ; } goto LA4; LA7: ; { TY177507 LOC27; Tnode290802* LOC28; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Tnode290802*)0; LOC28 = HEX5BHEX5D_291238_850551059(branch0, j_545676_839829468); LOC27[0] = genliteral_537273_839829468(p0, LOC28); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_180), LOC27, 1); } LA4: ; res_545720_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, gentraverseproc_535039_839829468)(Ttraversalclosure535019* c0, Ropeobj177006* accessor0, Tnode290802* n0) { { { if (!(n0 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; switch ((*n0).kind) { case ((Tnodekind290020) 138): { { NI i_535068_839829468; NI HEX3Atmp_535239_839829468; NI LOC7; NI res_535242_839829468; i_535068_839829468 = (NI)0; HEX3Atmp_535239_839829468 = (NI)0; LOC7 = (NI)0; LOC7 = sonslen_293351_850551059(n0); HEX3Atmp_535239_839829468 = (NI)(LOC7 - ((NI) 1)); res_535242_839829468 = ((NI) 0); { while (1) { if (!(res_535242_839829468 <= HEX3Atmp_535239_839829468)) goto LA9; i_535068_839829468 = res_535242_839829468; gentraverseproc_535039_839829468(c0, accessor0, (*n0).kindU.S6.sons->data[i_535068_839829468]); res_535242_839829468 += ((NI) 1); } LA9: ; } } } break; case ((Tnodekind290020) 139): { Tcproc527021* p0; Tsym290834* disc0; TY530811 LOC15; TY531289 LOC28; { if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3)))) goto LA13; internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_162)); } LA13: ; p0 = (*c0).p; disc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = accessor0; LOC15[1] = (*disc0).loc.r; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_163), LOC15, 2); { NI i_535098_839829468; NI HEX3Atmp_535249_839829468; NI LOC17; NI res_535252_839829468; i_535098_839829468 = (NI)0; HEX3Atmp_535249_839829468 = (NI)0; LOC17 = (NI)0; LOC17 = sonslen_293351_850551059(n0); HEX3Atmp_535249_839829468 = (NI)(LOC17 - ((NI) 1)); res_535252_839829468 = ((NI) 1); { while (1) { Tnode290802* branch0; Tnode290802* LOC26; TY531289 LOC27; if (!(res_535252_839829468 <= HEX3Atmp_535249_839829468)) goto LA19; i_535098_839829468 = res_535252_839829468; branch0 = (*n0).kindU.S6.sons->data[i_535098_839829468]; { if (!((*branch0).kind == ((Tnodekind290020) 85))) goto LA22; gencaserange_535028_839829468((*c0).p, branch0); } goto LA20; LA22: ; { TY531289 LOC25; memset((void*)LOC25, 0, sizeof(LOC25)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_181), LOC25, 0); } LA20: ; LOC26 = (Tnode290802*)0; LOC26 = lastson_293364_850551059(branch0); gentraverseproc_535039_839829468(c0, accessor0, LOC26); memset((void*)LOC27, 0, sizeof(LOC27)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_182), LOC27, 0); res_535252_839829468 += ((NI) 1); } LA19: ; } } memset((void*)LOC28, 0, sizeof(LOC28)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_183), LOC28, 0); } break; case ((Tnodekind290020) 3): { Tsym290834* field0; TY530811 LOC34; Ropeobj177006* LOC35; field0 = (*n0).kindU.S4.sym; { if (!((*field0).loc.t == NIM_NIL)) goto LA32; internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184)); } LA32: ; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = accessor0; LOC34[1] = (*field0).loc.r; LOC35 = (Ropeobj177006*)0; LOC35 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_90), LOC34, 2); gentraverseproc_535022_839829468(c0, LOC35, (*field0).loc.t); } break; default: { internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184)); } break; } }BeforeRet: ; } N_NIMCALL(void, linecg_530707_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0) { Ropeobj177006** LOC1; Ropeobj177006* LOC2; Ropeobj177006* LOC3; LOC1 = (Ropeobj177006**)0; LOC1 = s_527179_3723162438(p0, s0); LOC2 = (Ropeobj177006*)0; LOC2 = ropecg_530407_839829468((*p0).module, frmt0, args0, args0Len0); LOC3 = (Ropeobj177006*)0; LOC3 = indentline_530656_839829468(p0, LOC2); add_177482_2381377266(LOC1, LOC3); } N_NIMCALL(void, gentraverseproc_535022_839829468)(Ttraversalclosure535019* c0, Ropeobj177006* accessor0, Ttype290840* typ_535027_839829468) { Ttype290840* typ_535302_839829468; Tcproc527021* p0; { { if (!(typ_535027_839829468 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; typ_535302_839829468 = getuniquetype_526640_2036603609(typ_535027_839829468); p0 = (*c0).p; switch ((*typ_535302_839829468).kind) { case ((Ttypekind290244) 11): case ((Ttypekind290244) 10): case ((Ttypekind290244) 8): { Ttype290840* LOC6; LOC6 = (Ttype290840*)0; LOC6 = lastson_293377_850551059(typ_535302_839829468); gentraverseproc_535022_839829468(c0, accessor0, LOC6); } break; case ((Ttypekind290244) 4): case ((Ttypekind290244) 16): { NI64 arraysize0; Tloc290816 i0; Ttype290840* LOC8; TY530811 LOC9; TY530811 LOC10; Ropeobj177006* LOC11; TY531289 LOC12; arraysize0 = lengthord_318007_3876443242((*typ_535302_839829468).sons->data[((NI) 0)]); memset((void*)(&i0), 0, sizeof(i0)); LOC8 = (Ttype290840*)0; LOC8 = getsystype_336150_3937434831(((Ttypekind290244) 31)); gettemp_535032_839829468(p0, LOC8, (&i0), NIM_FALSE); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = i0.r; LOC9[1] = rope_177401_2381377266(arraysize0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_159), LOC9, 2); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = accessor0; LOC10[1] = i0.r; LOC11 = (Ropeobj177006*)0; LOC11 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC10, 2); gentraverseproc_535022_839829468(c0, LOC11, (*typ_535302_839829468).sons->data[((NI) 1)]); memset((void*)LOC12, 0, sizeof(LOC12)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_160), LOC12, 0); } break; case ((Ttypekind290244) 17): { { NI i_535325_839829468; NI HEX3Atmp_535384_839829468; NI LOC15; NI res_535387_839829468; i_535325_839829468 = (NI)0; HEX3Atmp_535384_839829468 = (NI)0; LOC15 = (NI)0; LOC15 = sonslen_293327_850551059(typ_535302_839829468); HEX3Atmp_535384_839829468 = (NI)(LOC15 - ((NI) 1)); res_535387_839829468 = ((NI) 0); { while (1) { Ttype290840* x0; Ropeobj177006* LOC22; if (!(res_535387_839829468 <= HEX3Atmp_535384_839829468)) goto LA17; i_535325_839829468 = res_535387_839829468; x0 = (*typ_535302_839829468).sons->data[i_535325_839829468]; { if (!!((x0 == NIM_NIL))) goto LA20; x0 = skiptypes_294099_850551059(x0, IL64(211106247215360)); } LA20: ; LOC22 = (Ropeobj177006*)0; LOC22 = parentobj_535257_839829468(accessor0, (*(*c0).p).module); gentraverseproc_535022_839829468(c0, LOC22, x0); res_535387_839829468 += ((NI) 1); } LA17: ; } } { if (!!(((*typ_535302_839829468).n == NIM_NIL))) goto LA25; gentraverseproc_535039_839829468(c0, accessor0, (*typ_535302_839829468).n); } LA25: ; } break; case ((Ttypekind290244) 18): { Ttype290840* typ0; typ0 = getuniquetype_526640_2036603609(typ_535302_839829468); { NI i_535363_839829468; NI HEX3Atmp_535392_839829468; NI LOC29; NI res_535395_839829468; i_535363_839829468 = (NI)0; HEX3Atmp_535392_839829468 = (NI)0; LOC29 = (NI)0; LOC29 = sonslen_293327_850551059(typ0); HEX3Atmp_535392_839829468 = (NI)(LOC29 - ((NI) 1)); res_535395_839829468 = ((NI) 0); { while (1) { TY530811 LOC32; Ropeobj177006* LOC33; if (!(res_535395_839829468 <= HEX3Atmp_535392_839829468)) goto LA31; i_535363_839829468 = res_535395_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = accessor0; LOC32[1] = rope_177401_2381377266(((NI64) (i_535363_839829468))); LOC33 = (Ropeobj177006*)0; LOC33 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_185), LOC32, 2); gentraverseproc_535022_839829468(c0, LOC33, (*typ0).sons->data[i_535363_839829468]); res_535395_839829468 += ((NI) 1); } LA31: ; } } } break; case ((Ttypekind290244) 22): case ((Ttypekind290244) 28): case ((Ttypekind290244) 24): { TY177507 LOC35; memset((void*)LOC35, 0, sizeof(LOC35)); LOC35[0] = accessor0; linecg_530707_839829468(p0, ((Tcprocsection527011) 2), (*c0).visitorfrmt, LOC35, 1); } break; case ((Ttypekind290244) 25): { { TY177507 LOC41; TY177507 LOC42; if (!((*typ_535302_839829468).callconv == ((Tcallingconvention290002) 8))) goto LA39; memset((void*)LOC41, 0, sizeof(LOC41)); memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = accessor0; LOC41[0] = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_186), LOC42, 1); linecg_530707_839829468(p0, ((Tcprocsection527011) 2), (*c0).visitorfrmt, LOC41, 1); } LA39: ; } break; default: { } break; } }BeforeRet: ; } N_NIMCALL(void, gentraverseprocseq_535399_839829468)(Ttraversalclosure535019* c0, Ropeobj177006* accessor0, Ttype290840* typ0) { Tcproc527021* p0; Tloc290816 i0; Ttype290840* LOC1; TY533238 LOC2; NimStringDesc* LOC3; TY530811 LOC11; Ropeobj177006* LOC12; TY531289 LOC13; p0 = (*c0).p; memset((void*)(&i0), 0, sizeof(i0)); LOC1 = (Ttype290840*)0; LOC1 = getsystype_336150_3937434831(((Ttypekind290244) 31)); gettemp_535032_839829468(p0, LOC1, (&i0), NIM_FALSE); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = i0.r; LOC2[1] = accessor0; LOC3 = (NimStringDesc*)0; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC6) goto LA7; LOC6 = (((*(*(*(*c0).p).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA7: ; if (!LOC6) goto LA8; LOC3 = copyString(((NimStringDesc*) &T839829468_157)); } goto LA4; LA8: ; { LOC3 = copyString(((NimStringDesc*) &T839829468_158)); } LA4: ; LOC2[2] = rope_177277_2381377266(LOC3); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_156), LOC2, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = accessor0; LOC11[1] = i0.r; LOC12 = (Ropeobj177006*)0; LOC12 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_187), LOC11, 2); gentraverseproc_535022_839829468(c0, LOC12, (*typ0).sons->data[((NI) 0)]); memset((void*)LOC13, 0, sizeof(LOC13)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_160), LOC13, 0); } N_NIMCALL(Ropeobj177006*, gentraverseproc_535632_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ttypeinforeason535016 reason0) { Ropeobj177006* result0; Ttraversalclosure535019 c0; Tcproc527021* p0; Ropeobj177006* header0; TY177507 LOC3; Ropeobj177006* t0; TY177507 LOC4; TY177507 LOC5; Ropeobj177006* generatedproc0; TY533235 LOC20; Ropeobj177006** LOC21; Ropeobj177006** LOC22; Ropeobj177006** LOC23; TY177507 LOC24; result0 = (Ropeobj177006*)0; memset((void*)(&c0), 0, sizeof(c0)); p0 = newproc_527206_3723162438(NIM_NIL, m0); result0 = gettempname_531596_839829468(m0); switch (reason0) { case ((Ttypeinforeason535016) 0): { c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_145)); } break; default: { } break; } memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = result0; header0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_146), LOC3, 1); t0 = gettypedesc_533671_839829468(m0, typ0); memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = t0; linef_530700_839829468(p0, ((Tcprocsection527011) 0), ((NimStringDesc*) &T839829468_147), LOC4, 1); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = t0; linef_530700_839829468(p0, ((Tcprocsection527011) 1), ((NimStringDesc*) &T839829468_148), LOC5, 1); c0.p = p0; { Ropeobj177006* LOC10; if (!((*typ0).kind == ((Ttypekind290244) 24))) goto LA8; LOC10 = (Ropeobj177006*)0; LOC10 = rope_177277_2381377266(((NimStringDesc*) &T839829468_188)); gentraverseprocseq_535399_839829468((&c0), LOC10, typ0); } goto LA6; LA8: ; { { Ttype290840* LOC14; Ropeobj177006* LOC17; LOC14 = (Ttype290840*)0; LOC14 = skiptypes_294099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106232576256)); if (!((*LOC14).kind == ((Ttypekind290244) 4) || (*LOC14).kind == ((Ttypekind290244) 16))) goto LA15; LOC17 = (Ropeobj177006*)0; LOC17 = rope_177277_2381377266(((NimStringDesc*) &T839829468_188)); gentraverseproc_535022_839829468((&c0), LOC17, (*typ0).sons->data[((NI) 0)]); } goto LA12; LA15: ; { Ropeobj177006* LOC19; LOC19 = (Ropeobj177006*)0; LOC19 = rope_177277_2381377266(((NimStringDesc*) &T839829468_189)); gentraverseproc_535022_839829468((&c0), LOC19, (*typ0).sons->data[((NI) 0)]); } LA12: ; } LA6: ; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = header0; LOC21 = (Ropeobj177006**)0; LOC21 = s_527179_3723162438(p0, ((Tcprocsection527011) 0)); LOC20[1] = (*LOC21); LOC22 = (Ropeobj177006**)0; LOC22 = s_527179_3723162438(p0, ((Tcprocsection527011) 1)); LOC20[2] = (*LOC22); LOC23 = (Ropeobj177006**)0; LOC23 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); LOC20[3] = (*LOC23); generatedproc0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_190), LOC20, 4); memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = header0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC24, 1); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 10))- 0], generatedproc0); return result0; } N_NIMCALL(void, genarrayinfo_535005_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0) { Ropeobj177006* LOC1; LOC1 = (Ropeobj177006*)0; LOC1 = gentypeinfo_533941_839829468(m0, (*typ0).sons->data[((NI) 1)]); gentypeinfoauxbase_533960_839829468(m0, typ0, typ0, name0, LOC1); } N_NIMCALL(void, gensetinfo_534867_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0) { Ropeobj177006* tmp0; TY533238 LOC1; NI64 LOC2; gentypeinfoaux_534027_839829468(m0, typ0, typ0, name0); tmp0 = getnimnode_533945_839829468(m0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = tmp0; LOC2 = (NI64)0; LOC2 = firstord_318001_3876443242(typ0); LOC1[1] = rope_177401_2381377266(LOC2); LOC1[2] = name0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_193), LOC1, 3); } N_NIMCALL(void, genenuminfo_534597_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ropeobj177006* name0) { Ropeobj177006* nodeptrs0; NI length0; TY530811 LOC1; Ropeobj177006* enumnames0; Ropeobj177006* specialcases0; NI firstnimnode0; NIM_BOOL hasholes0; Ropeobj177006* enumarray0; Ropeobj177006* counter0; TY177507 LOC24; TY533238 LOC25; TY534847 LOC26; TY533235 LOC27; gentypeinfoaux_534027_839829468(m0, typ0, typ0, name0); nodeptrs0 = gettempname_531596_839829468(m0); length0 = sonslen_293351_850551059((*typ0).n); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = nodeptrs0; LOC1[1] = rope_177401_2381377266(((NI64) (length0))); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC1, 2); enumnames0 = (Ropeobj177006*)0; specialcases0 = (Ropeobj177006*)0; firstnimnode0 = (*m0).typenodes; hasholes0 = NIM_FALSE; { NI i_534622_839829468; NI HEX3Atmp_534860_839829468; NI res_534863_839829468; i_534622_839829468 = (NI)0; HEX3Atmp_534860_839829468 = (NI)0; HEX3Atmp_534860_839829468 = (NI)(length0 - ((NI) 1)); res_534863_839829468 = ((NI) 0); { while (1) { Tsym290834* field0; Ropeobj177006* elemnode0; if (!(res_534863_839829468 <= HEX3Atmp_534860_839829468)) goto LA4; i_534622_839829468 = res_534863_839829468; field0 = (*(*(*typ0).n).kindU.S6.sons->data[i_534622_839829468]).kindU.S4.sym; elemnode0 = getnimnode_533945_839829468(m0); { Ropeobj177006* LOC9; if (!((*field0).ast == NIM_NIL)) goto LA7; LOC9 = (Ropeobj177006*)0; LOC9 = makecstring_189638_155036129((*(*field0).name).s); add_177482_2381377266(&enumnames0, LOC9); } goto LA5; LA7: ; { Ropeobj177006* LOC11; LOC11 = (Ropeobj177006*)0; LOC11 = makecstring_189638_155036129((*(*field0).ast).kindU.S3.strval); add_177482_2381377266(&enumnames0, LOC11); } LA5: ; { NimStringDesc* LOC16; if (!(i_534622_839829468 < (NI)(length0 - ((NI) 1)))) goto LA14; LOC16 = (NimStringDesc*)0; LOC16 = rawNewString(tnl_175644_4151366050->Sup.len + 2); appendString(LOC16, ((NimStringDesc*) &T839829468_110)); appendString(LOC16, tnl_175644_4151366050); add_177487_2381377266(&enumnames0, LOC16); } LA14: ; { NIM_BOOL LOC19; TY530811 LOC23; LOC19 = (NIM_BOOL)0; LOC19 = !(((*field0).position == i_534622_839829468)); if (LOC19) goto LA20; LOC19 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 5))&31U)))!=0); LA20: ; if (!LOC19) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = elemnode0; LOC23[1] = rope_177401_2381377266(((NI64) ((*field0).position))); addf_178205_2381377266(&specialcases0, ((NimStringDesc*) &T839829468_194), LOC23, 2); hasholes0 = NIM_TRUE; } LA21: ; res_534863_839829468 += ((NI) 1); } LA4: ; } } enumarray0 = gettempname_531596_839829468(m0); counter0 = gettempname_531596_839829468(m0); memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = counter0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 12))- 0], ((NimStringDesc*) &T839829468_195), LOC24, 1); memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = enumarray0; LOC25[1] = rope_177401_2381377266(((NI64) (length0))); LOC25[2] = enumnames0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 12))- 0], ((NimStringDesc*) &T839829468_196), LOC25, 3); memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = counter0; LOC26[1] = rope_177401_2381377266(((NI64) (length0))); LOC26[2] = (*m0).typenodesname; LOC26[3] = rope_177401_2381377266(((NI64) (firstnimnode0))); LOC26[4] = enumarray0; LOC26[5] = nodeptrs0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_197), LOC26, 6); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], specialcases0); memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = getnimnode_533945_839829468(m0); LOC27[1] = rope_177401_2381377266(((NI64) (length0))); LOC27[2] = nodeptrs0; LOC27[3] = name0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_198), LOC27, 4); { TY177507 LOC32; if (!hasholes0) goto LA30; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = name0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_199), LOC32, 1); } LA30: ; } N_NIMCALL(Ropeobj177006*, discriminatortablename_534057_839829468)(Tcgen527027* m0, Ttype290840* objtype_534060_839829468, Tsym290834* d0) { Ropeobj177006* result0; Ttype290840* objtype0; TY530811 LOC8; NimStringDesc* LOC9; result0 = (Ropeobj177006*)0; objtype0 = objtype_534060_839829468; { while (1) { Tsym290834* LOC3; LOC3 = (Tsym290834*)0; LOC3 = lookupinrecord_297119_2984716966((*objtype0).n, (*d0).name); if (!(LOC3 == NIM_NIL)) goto LA2; objtype0 = (*objtype0).sons->data[((NI) 0)]; } LA2: ; } { if (!((*objtype0).sym == NIM_NIL)) goto LA6; internalerror_194100_155036129((*d0).info, ((NimStringDesc*) &T839829468_200)); } LA6: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rope_177401_2381377266(((NI64) ((*objtype0).Sup.id))); LOC9 = (NimStringDesc*)0; LOC9 = mangle_526847_2036603609((*(*d0).name).s); LOC8[1] = rope_177277_2381377266(LOC9); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_201), LOC8, 2); return result0; } N_NIMCALL(void, genobjectfields_534104_839829468)(Tcgen527027* m0, Ttype290840* typ0, Tnode290802* n0, Ropeobj177006* expr0) { switch ((*n0).kind) { case ((Tnodekind290020) 138): { NI L0; L0 = sonslen_293351_850551059(n0); { if (!(L0 == ((NI) 1))) goto LA4; genobjectfields_534104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[((NI) 0)], expr0); } goto LA2; LA4: ; { Ropeobj177006* tmp0; TY530811 LOC9; TY533238 LOC14; if (!(((NI) 0) < L0)) goto LA7; tmp0 = gettempname_531596_839829468(m0); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = tmp0; LOC9[1] = rope_177401_2381377266(((NI64) (L0))); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC9, 2); { NI i_534127_839829468; NI HEX3Atmp_534482_839829468; NI res_534485_839829468; i_534127_839829468 = (NI)0; HEX3Atmp_534482_839829468 = (NI)0; HEX3Atmp_534482_839829468 = (NI)(L0 - ((NI) 1)); res_534485_839829468 = ((NI) 0); { while (1) { Ropeobj177006* tmp20; TY533238 LOC13; if (!(res_534485_839829468 <= HEX3Atmp_534482_839829468)) goto LA12; i_534127_839829468 = res_534485_839829468; tmp20 = getnimnode_533945_839829468(m0); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = tmp0; LOC13[1] = rope_177401_2381377266(((NI64) (i_534127_839829468))); LOC13[2] = tmp20; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC13, 3); genobjectfields_534104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[i_534127_839829468], tmp20); res_534485_839829468 += ((NI) 1); } LA12: ; } } memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = expr0; LOC14[1] = rope_177401_2381377266(((NI64) (L0))); LOC14[2] = tmp0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC14, 3); } goto LA2; LA7: ; { TY530811 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = expr0; LOC16[1] = rope_177401_2381377266(((NI64) (L0))); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC16, 2); } LA2: ; } break; case ((Tnodekind290020) 139): { Tsym290834* field0; Ropeobj177006* tmp0; NI64 L0; TY534401 LOC18; TY530811 LOC19; field0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; tmp0 = discriminatortablename_534057_839829468(m0, typ0, field0); L0 = lengthord_318007_3876443242((*field0).typ); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = expr0; LOC18[1] = gettypedesc_533671_839829468(m0, typ0); LOC18[2] = (*field0).loc.r; LOC18[3] = gentypeinfo_533941_839829468(m0, (*field0).typ); LOC18[4] = makecstring_189638_155036129((*(*field0).name).s); LOC18[5] = tmp0; LOC18[6] = rope_177401_2381377266(L0); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_202), LOC18, 7); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = tmp0; LOC19[1] = rope_177401_2381377266((NI64)(L0 + IL64(1))); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 8))- 0], ((NimStringDesc*) &T839829468_203), LOC19, 2); { NI i_534421_839829468; NI HEX3Atmp_534499_839829468; NI LOC21; NI res_534502_839829468; i_534421_839829468 = (NI)0; HEX3Atmp_534499_839829468 = (NI)0; LOC21 = (NI)0; LOC21 = sonslen_293351_850551059(n0); HEX3Atmp_534499_839829468 = (NI)(LOC21 - ((NI) 1)); res_534502_839829468 = ((NI) 1); { while (1) { Tnode290802* b0; Ropeobj177006* tmp20; Tnode290802* LOC24; if (!(res_534502_839829468 <= HEX3Atmp_534499_839829468)) goto LA23; i_534421_839829468 = res_534502_839829468; b0 = (*n0).kindU.S6.sons->data[i_534421_839829468]; tmp20 = getnimnode_533945_839829468(m0); LOC24 = (Tnode290802*)0; LOC24 = lastson_293364_850551059(b0); genobjectfields_534104_839829468(m0, typ0, LOC24, tmp20); switch ((*b0).kind) { case ((Tnodekind290020) 85): { { NI LOC28; LOC28 = (NI)0; LOC28 = sonslen_293351_850551059(b0); if (!(LOC28 < ((NI) 2))) goto LA29; internalerror_194100_155036129((*b0).info, ((NimStringDesc*) &T839829468_204)); } LA29: ; { NI j_534436_839829468; NI HEX3Atmp_534492_839829468; NI LOC32; NI res_534495_839829468; j_534436_839829468 = (NI)0; HEX3Atmp_534492_839829468 = (NI)0; LOC32 = (NI)0; LOC32 = sonslen_293351_850551059(b0); HEX3Atmp_534492_839829468 = (NI)(LOC32 - ((NI) 2)); res_534495_839829468 = ((NI) 0); { while (1) { if (!(res_534495_839829468 <= HEX3Atmp_534492_839829468)) goto LA34; j_534436_839829468 = res_534495_839829468; { NI x0; NI64 LOC39; NI y0; NI64 LOC40; if (!((*(*b0).kindU.S6.sons->data[j_534436_839829468]).kind == ((Tnodekind290020) 44))) goto LA37; LOC39 = (NI64)0; LOC39 = getordvalue_318129_3876443242((*(*b0).kindU.S6.sons->data[j_534436_839829468]).kindU.S6.sons->data[((NI) 0)]); x0 = ((NI) (LOC39)); LOC40 = (NI64)0; LOC40 = getordvalue_318129_3876443242((*(*b0).kindU.S6.sons->data[j_534436_839829468]).kindU.S6.sons->data[((NI) 1)]); y0 = ((NI) (LOC40)); { while (1) { TY533238 LOC43; if (!(x0 <= y0)) goto LA42; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = tmp0; LOC43[1] = rope_177401_2381377266(((NI64) (x0))); LOC43[2] = tmp20; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC43, 3); x0 += ((NI) 1); } LA42: ; } } goto LA35; LA37: ; { TY533238 LOC45; NI64 LOC46; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = tmp0; LOC46 = (NI64)0; LOC46 = getordvalue_318129_3876443242((*b0).kindU.S6.sons->data[j_534436_839829468]); LOC45[1] = rope_177401_2381377266(LOC46); LOC45[2] = tmp20; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC45, 3); } LA35: ; res_534495_839829468 += ((NI) 1); } LA34: ; } } } break; case ((Tnodekind290020) 88): { TY533238 LOC48; memset((void*)LOC48, 0, sizeof(LOC48)); LOC48[0] = tmp0; LOC48[1] = rope_177401_2381377266(L0); LOC48[2] = tmp20; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC48, 3); } break; default: { internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_205)); } break; } res_534502_839829468 += ((NI) 1); } LA23: ; } } } break; case ((Tnodekind290020) 3): { Tsym290834* field0; field0 = (*n0).kindU.S4.sym; { TY534475 LOC55; if (!((*field0).kindU.S4.bitsize == ((NI) 0))) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = expr0; LOC55[1] = gettypedesc_533671_839829468(m0, typ0); LOC55[2] = (*field0).loc.r; LOC55[3] = gentypeinfo_533941_839829468(m0, (*field0).typ); LOC55[4] = makecstring_189638_155036129((*(*field0).name).s); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_206), LOC55, 5); } LA53: ; } break; default: { internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_207)); } break; } } N_NIMCALL(void, genobjectinfo_534506_839829468)(Tcgen527027* m0, Ttype290840* typ0, Ttype290840* origtype0, Ropeobj177006* name0) { Ropeobj177006* tmp0; TY530811 LOC12; Ttype290840* t0; { if (!((*typ0).kind == ((Ttypekind290244) 17))) goto LA3; gentypeinfoaux_534027_839829468(m0, typ0, origtype0, name0); } goto LA1; LA3: ; { Ropeobj177006* LOC6; LOC6 = (Ropeobj177006*)0; LOC6 = rope_177277_2381377266(((NimStringDesc*) &T839829468_18)); gentypeinfoauxbase_533960_839829468(m0, typ0, origtype0, name0, LOC6); } LA1: ; tmp0 = getnimnode_533945_839829468(m0); { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = isimportedcpptype_531476_839829468(typ0); if (!!(LOC9)) goto LA10; genobjectfields_534104_839829468(m0, typ0, (*typ0).n, tmp0); } LA10: ; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = name0; LOC12[1] = tmp0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC12, 2); t0 = (*typ0).sons->data[((NI) 0)]; { while (1) { if (!!((t0 == NIM_NIL))) goto LA14; t0 = skiptypes_294099_850551059(t0, IL64(211106247215360)); (*t0).flags |= ((NU32)1)<<((((Ttypeflag290431) 5))%(sizeof(NU32)*8)); t0 = (*t0).sons->data[((NI) 0)]; } LA14: ; } } N_NIMCALL(void, gendeepcopyproc_536066_839829468)(Tcgen527027* m0, Tsym290834* s0, Ropeobj177006* result0) { TY530811 LOC1; genproc_530951_839829468(m0, s0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = result0; LOC1[1] = (*s0).loc.r; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_208), LOC1, 2); } N_NIMCALL(Ropeobj177006*, gentypeinfo_533941_839829468)(Tcgen527027* m0, Ttype290840* t_533944_839829468) { Ropeobj177006* result0; Ttype290840* origtype0; Ttype290840* t0; TY177507 LOC1; Tsym290834* owner0; Ttype290840* LOC12; Ropeobj177006* LOC66; Ropeobj177006* LOC67; Ropeobj177006* LOC68; { result0 = (Ropeobj177006*)0; origtype0 = t_533944_839829468; t0 = getuniquetype_526640_2036603609(t_533944_839829468); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rope_177401_2381377266(((NI64) ((*t0).Sup.id))); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_127), LOC1, 1); { NIM_BOOL LOC4; Ropeobj177006* LOC7; Ropeobj177006* LOC8; Ropeobj177006* LOC9; LOC4 = (NIM_BOOL)0; LOC4 = containsorincl_266862_2627731572((&(*m0).typeinfomarker), (*t0).Sup.id); if (!LOC4) goto LA5; LOC7 = (Ropeobj177006*)0; LOC7 = rope_177277_2381377266(((NimStringDesc*) &T839829468_128)); LOC8 = (Ropeobj177006*)0; LOC8 = HEX26_177418_2381377266(LOC7, result0); LOC9 = (Ropeobj177006*)0; LOC9 = rope_177277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_177418_2381377266(LOC8, LOC9); goto BeforeRet; } LA5: ; { while (1) { if (!((*t0).kind == ((Ttypekind290244) 13))) goto LA11; t0 = lastson_293377_850551059(t0); } LA11: ; } LOC12 = (Ttype290840*)0; LOC12 = skiptypes_294099_850551059(t0, IL64(211106247256320)); owner0 = getmodule_297123_2984716966((*LOC12).owner); { Tcgen527027* LOC17; Ropeobj177006* LOC18; Ropeobj177006* LOC19; Ropeobj177006* LOC20; TY530811 LOC21; NimStringDesc* LOC22; Ropeobj177006* LOC23; Ropeobj177006* LOC24; Ropeobj177006* LOC25; if (!!((owner0 == (*m0).module))) goto LA15; LOC17 = (Tcgen527027*)0; LOC17 = bmod_527201_3723162438(owner0); LOC18 = (Ropeobj177006*)0; LOC18 = gentypeinfo_533941_839829468(LOC17, t0); LOC19 = (Ropeobj177006*)0; LOC19 = cgsym_530403_839829468(m0, ((NimStringDesc*) &T839829468_129)); LOC20 = (Ropeobj177006*)0; LOC20 = cgsym_530403_839829468(m0, ((NimStringDesc*) &T839829468_130)); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = result0; LOC22 = (NimStringDesc*)0; LOC22 = typetostring_318017_3876443242(t0, ((Tprefereddesc318011) 0)); LOC21[1] = rope_177277_2381377266(LOC22); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_131), LOC21, 2); LOC23 = (Ropeobj177006*)0; LOC23 = rope_177277_2381377266(((NimStringDesc*) &T839829468_128)); LOC24 = (Ropeobj177006*)0; LOC24 = HEX26_177418_2381377266(LOC23, result0); LOC25 = (Ropeobj177006*)0; LOC25 = rope_177277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_177418_2381377266(LOC24, LOC25); goto BeforeRet; } LA15: ; switch ((*t0).kind) { case ((Ttypekind290244) 3): case ((Ttypekind290244) 62): { result0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_132)); } break; case ((Ttypekind290244) 26): case ((Ttypekind290244) 1): case ((Ttypekind290244) 2): case ((Ttypekind290244) 29): case ((Ttypekind290244) 28): case ((Ttypekind290244) 31) ... ((Ttypekind290244) 44): case ((Ttypekind290244) 23): { Ropeobj177006* LOC28; LOC28 = (Ropeobj177006*)0; LOC28 = rope_177277_2381377266(((NimStringDesc*) &T839829468_132)); gentypeinfoauxbase_533960_839829468(m0, t0, t0, result0, LOC28); } break; case ((Ttypekind290244) 59): { { Ttype290840* LOC34; if (!!(((*t0).n == NIM_NIL))) goto LA32; LOC34 = (Ttype290840*)0; LOC34 = lastson_293377_850551059(t0); result0 = gentypeinfo_533941_839829468(m0, LOC34); } goto LA30; LA32: ; { NimStringDesc* LOC36; LOC36 = (NimStringDesc*)0; LOC36 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI290244))->Sup.len + 13); appendString(LOC36, ((NimStringDesc*) &T839829468_137)); appendString(LOC36, reprEnum((NI)(*t0).kind, (&NTI290244))); appendChar(LOC36, 41); internalerror_194113_155036129(LOC36); } LA30: ; } break; case ((Ttypekind290244) 25): { { Ropeobj177006* LOC42; if (!!(((*t0).callconv == ((Tcallingconvention290002) 8)))) goto LA40; LOC42 = (Ropeobj177006*)0; LOC42 = rope_177277_2381377266(((NimStringDesc*) &T839829468_132)); gentypeinfoauxbase_533960_839829468(m0, t0, t0, result0, LOC42); } goto LA38; LA40: ; { Ttype290840* LOC44; LOC44 = (Ttype290840*)0; LOC44 = fakeclosuretype_535010_839829468((*t0).owner); gentupleinfo_534549_839829468(m0, LOC44, result0); } LA38: ; } break; case ((Ttypekind290244) 24): case ((Ttypekind290244) 22): { gentypeinfoaux_534027_839829468(m0, t0, t0, result0); { Ropeobj177006* markerproc0; TY530811 LOC50; if (!(((Tgcmode168080) 4) <= gselectedgc_168133_2607990831)) goto LA48; markerproc0 = gentraverseproc_535632_839829468(m0, t0, ((Ttypeinforeason535016) 0)); memset((void*)LOC50, 0, sizeof(LOC50)); LOC50[0] = result0; LOC50[1] = markerproc0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_192), LOC50, 2); } LA48: ; } break; case ((Ttypekind290244) 21): case ((Ttypekind290244) 20): { gentypeinfoaux_534027_839829468(m0, t0, t0, result0); } break; case ((Ttypekind290244) 4): case ((Ttypekind290244) 16): { genarrayinfo_535005_839829468(m0, t0, result0); } break; case ((Ttypekind290244) 19): { gensetinfo_534867_839829468(m0, t0, result0); } break; case ((Ttypekind290244) 14): { genenuminfo_534597_839829468(m0, t0, result0); } break; case ((Ttypekind290244) 17): { genobjectinfo_534506_839829468(m0, t0, origtype0, result0); } break; case ((Ttypekind290244) 18): { gentupleinfo_534549_839829468(m0, t0, result0); } break; default: { NimStringDesc* LOC58; LOC58 = (NimStringDesc*)0; LOC58 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI290244))->Sup.len + 13); appendString(LOC58, ((NimStringDesc*) &T839829468_137)); appendString(LOC58, reprEnum((NI)(*t0).kind, (&NTI290244))); appendChar(LOC58, 41); internalerror_194113_155036129(LOC58); } break; } { if (!!(((*t0).deepcopy == NIM_NIL))) goto LA61; gendeepcopyproc_536066_839829468(m0, (*t0).deepcopy, result0); } goto LA59; LA61: ; { if (!!(((*origtype0).deepcopy == NIM_NIL))) goto LA64; gendeepcopyproc_536066_839829468(m0, (*origtype0).deepcopy, result0); } goto LA59; LA64: ; LA59: ; LOC66 = (Ropeobj177006*)0; LOC66 = rope_177277_2381377266(((NimStringDesc*) &T839829468_128)); LOC67 = (Ropeobj177006*)0; LOC67 = HEX26_177418_2381377266(LOC66, result0); LOC68 = (Ropeobj177006*)0; LOC68 = rope_177277_2381377266(((NimStringDesc*) &T839829468_117)); result0 = HEX26_177418_2381377266(LOC67, LOC68); }BeforeRet: ; return result0; } N_NIMCALL(void, localdebuginfo_536449_839829468)(Tcproc527021* p0, Tsym290834* s0) { Ropeobj177006* a0; TY533235 LOC16; NimStringDesc* LOC17; { { if (!!(((163840 & (*p0).options) == 163840))) goto LA3; goto BeforeRet; } LA3: ; { Ttype290840* LOC7; LOC7 = (Ttype290840*)0; LOC7 = skiptypes_294099_850551059((*s0).typ, IL64(211106240964864)); if (!((*LOC7).kind == ((Ttypekind290244) 27) || (*LOC7).kind == ((Ttypekind290244) 48))) goto LA8; goto BeforeRet; } LA8: ; a0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_52), (*s0).loc.r); { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*s0).kind == ((Tsymkind290435) 3)); if (!(LOC12)) goto LA13; LOC12 = ccgintroducedptr_531609_839829468(s0); LA13: ; if (!LOC12) goto LA14; a0 = (*s0).loc.r; } LA14: ; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rope_177401_2381377266(((NI64) ((*p0).maxframelen))); LOC17 = (NimStringDesc*)0; LOC17 = nsuNormalize((*(*s0).name).s); LOC16[1] = makecstring_189638_155036129(LOC17); LOC16[2] = a0; LOC16[3] = gentypeinfo_533941_839829468((*p0).module, (*s0).loc.t); linef_530700_839829468(p0, ((Tcprocsection527011) 1), ((NimStringDesc*) &T839829468_126), LOC16, 4); (*p0).maxframelen += ((NI) 1); (*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].framelen += ((NI) 1); }BeforeRet: ; } N_NIMCALL(void, assignlocalvar_536614_839829468)(Tcproc527021* p0, Tsym290834* s0) { Ropeobj177006* decl0; Ropeobj177006* LOC1; Ropeobj177006* LOC2; LOC1 = (Ropeobj177006*)0; LOC1 = localvardecl_536532_839829468(p0, s0); LOC2 = (Ropeobj177006*)0; LOC2 = HEX26_177447_2381377266(LOC1, ((NimStringDesc*) &T839829468_125)); decl0 = HEX26_177447_2381377266(LOC2, tnl_175644_4151366050); line_530690_839829468(p0, ((Tcprocsection527011) 0), decl0); localdebuginfo_536449_839829468(p0, s0); } N_NIMCALL(void, initlocalvar_536398_839829468)(Tcproc527021* p0, Tsym290834* v0, NIM_BOOL immediateasgn0) { { if (!!((((*v0).flags &(1U<<((NU)(((Tsymflag290184) 12))&31U)))!=0))) goto LA3; { if (!!(immediateasgn0)) goto LA7; constructloc_536388_839829468(p0, (*v0).loc, NIM_FALSE); } LA7: ; } LA3: ; } N_NIMCALL(void, fillresult_531865_839829468)(Tsym290834* param0) { TY531289 LOC1; Ropeobj177006* LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (Ropeobj177006*)0; LOC2 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_210), LOC1, 0); fillloc_530282_839829468((&(*param0).loc), ((Tlockind290808) 4), (*param0).typ, LOC2, ((Tstorageloc290812) 2)); { NIM_BOOL LOC5; Tctypekind527007 LOC6; LOC5 = (NIM_BOOL)0; LOC6 = (Tctypekind527007)0; LOC6 = mapreturntype_531445_839829468((*param0).typ); LOC5 = !((LOC6 == ((Tctypekind527007) 17))); if (!(LOC5)) goto LA7; LOC5 = isinvalidreturntype_531548_839829468((*param0).typ); LA7: ; if (!LOC5) goto LA8; (*param0).loc.flags |= ((NU16)1)<<((((Tlocflag290810) 0))%(sizeof(NU16)*8)); (*param0).loc.s = ((Tstorageloc290812) 0); } LA8: ; } N_NIMCALL(void, assignparam_536994_839829468)(Tcproc527021* p0, Tsym290834* s0) { localdebuginfo_536449_839829468(p0, s0); } N_NIMCALL(void, closuresetup_558158_839829468)(Tcproc527021* p0, Tsym290834* prc0) { Tnode290802* ls0; Tnode290802* LOC5; Tsym290834* env0; TY530811 LOC10; { { if (!!((((*(*prc0).typ).flags &(1U<<((NU)(((Ttypeflag290431) 11))&31U)))!=0))) goto LA3; goto BeforeRet; } LA3: ; LOC5 = (Tnode290802*)0; LOC5 = HEX5BHEX5D_291238_850551059((*prc0).ast, ((NI) 3)); ls0 = lastson_293364_850551059(LOC5); { if (!!(((*ls0).kind == ((Tnodekind290020) 3)))) goto LA8; internalerror_194100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_211)); } LA8: ; env0 = (*ls0).kindU.S4.sym; assignlocalvar_536614_839829468(p0, env0); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_536188_839829468((*env0).loc); LOC10[1] = gettypedesc_533671_839829468((*p0).module, (*env0).typ); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_212), LOC10, 2); }BeforeRet: ; } N_NIMCALL(Ropeobj177006*, initgcframe_536435_839829468)(Tcproc527021* p0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { TY177507 LOC5; if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = (*p0).gcframetype; result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_217), LOC5, 1); } LA3: ; return result0; } N_NIMCALL(Ropeobj177006*, initframe_558140_839829468)(Tcproc527021* p0, Ropeobj177006* procname0, Ropeobj177006* filename0) { Ropeobj177006* result0; Ropeobj177006* LOC1; result0 = (Ropeobj177006*)0; LOC1 = (Ropeobj177006*)0; LOC1 = cgsym_530403_839829468((*p0).module, ((NimStringDesc*) &T839829468_218)); { Ropeobj177006* LOC6; TY533235 LOC7; if (!(((NI) 0) < (*p0).maxframelen)) goto LA4; LOC6 = (Ropeobj177006*)0; LOC6 = cgsym_530403_839829468((*p0).module, ((NimStringDesc*) &T839829468_219)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = procname0; LOC7[1] = filename0; LOC7[2] = rope_177401_2381377266(((NI64) ((*p0).maxframelen))); LOC7[3] = rope_177401_2381377266(((NI64) ((*p0).blocks->data[((NI) 0)].framelen))); result0 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_220), LOC7, 4); } goto LA2; LA4: ; { TY530811 LOC9; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = procname0; LOC9[1] = filename0; result0 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_221), LOC9, 2); } LA2: ; return result0; } N_NIMCALL(void, appcg_530648_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0) { Ropeobj177006** LOC1; Ropeobj177006* LOC2; LOC1 = (Ropeobj177006**)0; LOC1 = s_527179_3723162438(p0, s0); LOC2 = (Ropeobj177006*)0; LOC2 = ropecg_530407_839829468((*p0).module, frmt0, args0, args0Len0); add_177482_2381377266(LOC1, LOC2); } N_NIMCALL(Ropeobj177006*, deinitgcframe_536441_839829468)(Tcproc527021* p0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { TY531289 LOC5; if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); result0 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_225), LOC5, 0); } LA3: ; return result0; } N_NIMCALL(Ropeobj177006*, deinitframe_558150_839829468)(Tcproc527021* p0) { Ropeobj177006* result0; TY531289 LOC1; result0 = (Ropeobj177006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); result0 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_226), LOC1, 0); return result0; } N_NIMCALL(void, genprocaux_558284_839829468)(Tcgen527027* m0, Tsym290834* prc0) { Tcproc527021* p0; Ropeobj177006* header0; Ropeobj177006* returnstmt0; Tnode290802* LOC51; Ropeobj177006* generatedproc0; p0 = newproc_527206_3723162438(prc0, m0); header0 = genprocheader_533867_839829468(m0, prc0); returnstmt0 = NIM_NIL; { NIM_BOOL LOC3; Tsym290834* res0; LOC3 = (NIM_BOOL)0; LOC3 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 9))&31U)))!=0)); if (!(LOC3)) goto LA4; LOC3 = !(((*(*prc0).typ).sons->data[((NI) 0)] == NIM_NIL)); LA4: ; if (!LOC3) goto LA5; { NI LOC9; LOC9 = (NI)0; LOC9 = len_291081_850551059((*prc0).ast); if (!(LOC9 <= ((NI) 7))) goto LA10; internalerror_194100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_120)); } LA10: ; res0 = (*(*(*prc0).ast).kindU.S6.sons->data[((NI) 7)]).kindU.S4.sym; { NIM_BOOL LOC14; TY177507 LOC34; LOC14 = (NIM_BOOL)0; LOC14 = isinvalidreturntype_531548_839829468((*(*prc0).typ).sons->data[((NI) 0)]); if (!!(LOC14)) goto LA15; { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 12))&31U)))!=0)) goto LA19; (*res0).flags |= ((NU32)1)<<((((Tsymflag290184) 12))%(sizeof(NU32)*8)); } LA19: ; { NIM_BOOL LOC23; NIM_BOOL LOC24; NIM_BOOL LOC26; Tnode290802* val0; Tnode290802* LOC29; Ropeobj177006* decl0; Tloc290816 a0; TY530811 LOC32; LOC23 = (NIM_BOOL)0; LOC24 = (NIM_BOOL)0; LOC24 = (((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 12))&31U)))!=0); if (!(LOC24)) goto LA25; LOC26 = (NIM_BOOL)0; LOC26 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC26) goto LA27; LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA27: ; LOC24 = LOC26; LA25: ; LOC23 = LOC24; if (!(LOC23)) goto LA28; LOC29 = (Tnode290802*)0; LOC29 = getbody_333227_1724185294(prc0); val0 = easyresultasgn_558191_839829468(LOC29); LOC23 = !((val0 == NIM_NIL)); LA28: ; if (!LOC23) goto LA30; decl0 = localvardecl_536532_839829468(p0, res0); memset((void*)(&a0), 0, sizeof(a0)); initlocexprsingleuse_537289_839829468(p0, val0, (&a0)); memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = decl0; LOC32[1] = rdloc_536188_839829468(a0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC32, 2); } goto LA21; LA30: ; { assignlocalvar_536614_839829468(p0, res0); initlocalvar_536398_839829468(p0, res0, NIM_FALSE); } LA21: ; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = rdloc_536188_839829468((*res0).loc); returnstmt0 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_209), LOC34, 1); } goto LA12; LA15: ; { fillresult_531865_839829468(res0); assignparam_536994_839829468(p0, res0); { Ttype290840* LOC38; LOC38 = (Ttype290840*)0; LOC38 = skiptypes_294099_850551059((*res0).typ, IL64(211106232576256)); if (!((*LOC38).kind == ((Ttypekind290244) 16))) goto LA39; (*res0).loc.s = ((Tstorageloc290812) 0); } LA39: ; } LA12: ; } LA5: ; { NI i_558627_839829468; NI HEX3Atmp_558743_839829468; NI LOC42; NI res_558746_839829468; i_558627_839829468 = (NI)0; HEX3Atmp_558743_839829468 = (NI)0; LOC42 = (NI)0; LOC42 = sonslen_293351_850551059((*(*prc0).typ).n); HEX3Atmp_558743_839829468 = (NI)(LOC42 - ((NI) 1)); res_558746_839829468 = ((NI) 1); { while (1) { if (!(res_558746_839829468 <= HEX3Atmp_558743_839829468)) goto LA44; i_558627_839829468 = res_558746_839829468; { Tsym290834* param0; param0 = (*(*(*(*prc0).typ).n).kindU.S6.sons->data[i_558627_839829468]).kindU.S4.sym; { NIM_BOOL LOC48; LOC48 = (NIM_BOOL)0; LOC48 = iscompiletimeonly_326706_3876443242((*param0).typ); if (!LOC48) goto LA49; goto LA45; } LA49: ; assignparam_536994_839829468(p0, param0); } LA45: ; res_558746_839829468 += ((NI) 1); } LA44: ; } } closuresetup_558158_839829468(p0, prc0); LOC51 = (Tnode290802*)0; LOC51 = getbody_333227_1724185294(prc0); genstmts_537244_839829468(p0, LOC51); generatedproc0 = (Ropeobj177006*)0; { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 14))&31U)))!=0)) goto LA54; { if (!((Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop271004) 6))&7U)))!=0)) goto LA58; header0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_213), header0); } LA58: ; } LA54: ; { TY533235 LOC68; Ropeobj177006** LOC69; Ropeobj177006** LOC70; Ropeobj177006** LOC71; if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 9))&31U)))!=0)) goto LA62; { if (!((Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop271004) 6))&7U)))!=0)) goto LA66; header0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_214), header0); } LA66: ; memset((void*)LOC68, 0, sizeof(LOC68)); LOC68[0] = header0; LOC69 = (Ropeobj177006**)0; LOC69 = s_527179_3723162438(p0, ((Tcprocsection527011) 0)); LOC68[1] = (*LOC69); LOC70 = (Ropeobj177006**)0; LOC70 = s_527179_3723162438(p0, ((Tcprocsection527011) 1)); LOC68[2] = (*LOC70); LOC71 = (Ropeobj177006**)0; LOC71 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); LOC68[3] = (*LOC71); generatedproc0 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_215), LOC68, 4); } goto LA60; LA62: ; { TY177507 LOC73; Ropeobj177006* LOC74; Ropeobj177006** LOC93; Ropeobj177006** LOC94; Ropeobj177006* LOC101; TY531289 LOC107; Ropeobj177006* LOC108; memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = header0; generatedproc0 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_216), LOC73, 1); LOC74 = (Ropeobj177006*)0; LOC74 = initgcframe_536435_839829468(p0); add_177482_2381377266(&generatedproc0, LOC74); { Ropeobj177006** LOC79; Ropeobj177006* procname0; Ropeobj177006* LOC80; Ropeobj177006* LOC81; if (!(((*prc0).options &(1U<<((NU)(((Toption168009) 15))&31U)))!=0)) goto LA77; LOC79 = (Ropeobj177006**)0; LOC79 = s_527179_3723162438(p0, ((Tcprocsection527011) 0)); add_177482_2381377266(&generatedproc0, (*LOC79)); procname0 = makecstring_189638_155036129((*(*prc0).name).s); LOC80 = (Ropeobj177006*)0; LOC80 = quotedfilename_194818_155036129((*prc0).info); LOC81 = (Ropeobj177006*)0; LOC81 = initframe_558140_839829468(p0, procname0, LOC80); add_177482_2381377266(&generatedproc0, LOC81); } goto LA75; LA77: ; { Ropeobj177006** LOC83; LOC83 = (Ropeobj177006**)0; LOC83 = s_527179_3723162438(p0, ((Tcprocsection527011) 0)); add_177482_2381377266(&generatedproc0, (*LOC83)); } LA75: ; { TY531289 LOC88; if (!(((*prc0).options &(1U<<((NU)(((Toption168009) 19))&31U)))!=0)) goto LA86; memset((void*)LOC88, 0, sizeof(LOC88)); appcg_530648_839829468(p0, ((Tcprocsection527011) 1), ((NimStringDesc*) &T839829468_222), LOC88, 0); } LA86: ; { if (!(*p0).beforeretneeded) goto LA91; add_177487_2381377266(&generatedproc0, ((NimStringDesc*) &T839829468_223)); } LA91: ; LOC93 = (Ropeobj177006**)0; LOC93 = s_527179_3723162438(p0, ((Tcprocsection527011) 1)); add_177482_2381377266(&generatedproc0, (*LOC93)); LOC94 = (Ropeobj177006**)0; LOC94 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); add_177482_2381377266(&generatedproc0, (*LOC94)); { TY531289 LOC99; Ropeobj177006* LOC100; if (!(*p0).beforeretneeded) goto LA97; memset((void*)LOC99, 0, sizeof(LOC99)); LOC100 = (Ropeobj177006*)0; LOC100 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_224), LOC99, 0); add_177482_2381377266(&generatedproc0, LOC100); } LA97: ; LOC101 = (Ropeobj177006*)0; LOC101 = deinitgcframe_536441_839829468(p0); add_177482_2381377266(&generatedproc0, LOC101); { Ropeobj177006* LOC106; if (!(((*prc0).options &(1U<<((NU)(((Toption168009) 15))&31U)))!=0)) goto LA104; LOC106 = (Ropeobj177006*)0; LOC106 = deinitframe_558150_839829468(p0); add_177482_2381377266(&generatedproc0, LOC106); } LA104: ; add_177482_2381377266(&generatedproc0, returnstmt0); memset((void*)LOC107, 0, sizeof(LOC107)); LOC108 = (Ropeobj177006*)0; LOC108 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_227), LOC107, 0); add_177482_2381377266(&generatedproc0, LOC108); } LA60: ; add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 10))- 0], generatedproc0); } N_NIMCALL(Tcgen527027*, findpendingmodule_530241_839829468)(Tcgen527027* m0, Tsym290834* s0) { Tcgen527027* result0; Tsym290834* ms0; result0 = (Tcgen527027*)0; ms0 = getmodule_297123_2984716966(s0); result0 = gmodules_527170_3723162438->data[(*ms0).position]; return result0; } N_NIMCALL(NIM_BOOL, isgetprocaddr_557442_839829468)(Tlib290820* lib0) { NIM_BOOL result0; Tnode290802* n0; NIM_BOOL LOC1; NIM_BOOL LOC2; result0 = (NIM_BOOL)0; n0 = (*lib0).path; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = ((*n0).kind == ((Tnodekind290020) 27) || (*n0).kind == ((Tnodekind290020) 29) || (*n0).kind == ((Tnodekind290020) 30) || (*n0).kind == ((Tnodekind290020) 31) || (*n0).kind == ((Tnodekind290020) 26) || (*n0).kind == ((Tnodekind290020) 28) || (*n0).kind == ((Tnodekind290020) 32)); if (!(LOC2)) goto LA3; LOC2 = !(((*n0).typ == NIM_NIL)); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA4; LOC1 = ((*(*n0).typ).kind == ((Ttypekind290244) 26) || (*(*n0).typ).kind == ((Ttypekind290244) 25)); LA4: ; result0 = LOC1; return result0; } N_NIMCALL(void, initlocexpr_537283_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* result0) { initloc_530273_839829468(result0, ((Tlockind290808) 0), (*e0).typ, ((Tstorageloc290812) 0)); expr_537248_839829468(p0, e0, result0); } N_NIMCALL(void, loaddynamiclib_557480_839829468)(Tcgen527027* m0, Tlib290820* lib0) { { Ropeobj177006* tmp0; TY177507 LOC5; if (!!((*lib0).generated)) goto LA3; (*lib0).generated = NIM_TRUE; tmp0 = gettempname_531596_839829468(m0); asgnRefNoCycle((void**) (&(*lib0).name), tmp0); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = tmp0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_228), LOC5, 1); { TY134602* s0; Ropeobj177006* loadlib0; TY530811 LOC18; if (!((*(*lib0).path).kind >= ((Tnodekind290020) 20) && (*(*lib0).path).kind <= ((Tnodekind290020) 22))) goto LA8; s0 = (TY134602*) newSeq((&NTI134602), 0); libcandidates_169605_2607990831((*(*lib0).path).kindU.S3.strval, (&s0)); rawmessage_192612_155036129(((Tmsgkind189002) 286), (*(*lib0).path).kindU.S3.strval); loadlib0 = NIM_NIL; { NI i_557847_839829468; NI HEX3Atmp_557902_839829468; NI res_557905_839829468; i_557847_839829468 = (NI)0; HEX3Atmp_557902_839829468 = (NI)0; HEX3Atmp_557902_839829468 = (s0 ? (s0->Sup.len-1) : -1); res_557905_839829468 = ((NI) 0); { while (1) { TY530811 LOC17; if (!(res_557905_839829468 <= HEX3Atmp_557902_839829468)) goto LA12; i_557847_839829468 = res_557905_839829468; (*m0).labels += ((NI) 1); { if (!(((NI) 0) < i_557847_839829468)) goto LA15; add_177487_2381377266(&loadlib0, ((NimStringDesc*) &T839829468_229)); } LA15: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = tmp0; LOC17[1] = getstrlit_547468_839829468(m0, s0->data[i_557847_839829468]); appcg_530632_839829468(m0, &loadlib0, ((NimStringDesc*) &T839829468_230), LOC17, 2); res_557905_839829468 += ((NI) 1); } LA12: ; } } memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = loadlib0; LOC18[1] = getstrlit_547468_839829468(m0, (*(*lib0).path).kindU.S3.strval); appcg_530632_839829468(m0, &(*m0).s[(((Tcfilesection527005) 16))- 0], ((NimStringDesc*) &T839829468_231), LOC18, 2); } goto LA6; LA8: ; { Tcproc527021* p0; Tloc290816 dest0; Ropeobj177006** LOC20; Ropeobj177006** LOC21; Ropeobj177006** LOC22; TY530811 LOC23; p0 = newproc_527206_3723162438(NIM_NIL, m0); (*p0).options = ((*p0).options & ~ 163840); memset((void*)(&dest0), 0, sizeof(dest0)); initlocexpr_537283_839829468(p0, (*lib0).path, (&dest0)); LOC20 = (Ropeobj177006**)0; LOC20 = s_527179_3723162438(p0, ((Tcprocsection527011) 0)); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], (*LOC20)); LOC21 = (Ropeobj177006**)0; LOC21 = s_527179_3723162438(p0, ((Tcprocsection527011) 1)); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 16))- 0], (*LOC21)); LOC22 = (Ropeobj177006**)0; LOC22 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 16))- 0], (*LOC22)); memset((void*)LOC23, 0, sizeof(LOC23)); LOC23[0] = tmp0; LOC23[1] = rdloc_536188_839829468(dest0); appcg_530632_839829468(m0, &(*m0).s[(((Tcfilesection527005) 16))- 0], ((NimStringDesc*) &T839829468_232), LOC23, 2); } LA6: ; } LA3: ; { if (!((*lib0).name == NIM_NIL)) goto LA26; internalerror_194113_155036129(((NimStringDesc*) &T839829468_233)); } LA26: ; } N_NIMCALL(Ropeobj177006*, mangledynlibproc_536816_839829468)(Tsym290834* sym0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag290184) 16))&31U)))!=0)) goto LA3; result0 = rope_177277_2381377266((*(*sym0).name).s); } goto LA1; LA3: ; { TY177507 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_177401_2381377266(((NI64) ((*sym0).Sup.id))); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_234), LOC6, 1); } LA1: ; return result0; } N_NIMCALL(void, symindynamiclib_557929_839829468)(Tcgen527027* m0, Tsym290834* sym0) { Tlib290820* lib0; NIM_BOOL iscall0; Ropeobj177006* extname0; Ropeobj177006* tmp0; TY530811 LOC43; lib0 = (*sym0).annex; iscall0 = isgetprocaddr_557442_839829468(lib0); extname0 = (*sym0).loc.r; { if (!!(iscall0)) goto LA3; loaddynamiclib_557480_839829468(m0, lib0); } LA3: ; tmp0 = mangledynlibproc_536816_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0); asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL); (*m0).labels += ((NI) 2); { Tnode290802* n0; Tloc290816 a0; Tnode290802* LOC9; Ropeobj177006* params0; Ropeobj177006* LOC10; Ropeobj177006* load0; TY533235 LOC17; NimStringDesc* LOC18; Tnode290802* last0; NimStringDesc* idx0; if (!iscall0) goto LA7; n0 = (*lib0).path; memset((void*)(&a0), 0, sizeof(a0)); LOC9 = (Tnode290802*)0; LOC9 = HEX5BHEX5D_291238_850551059(n0, ((NI) 0)); initlocexpr_537283_839829468((*m0).initproc, LOC9, (&a0)); LOC10 = (Ropeobj177006*)0; LOC10 = rdloc_536188_839829468(a0); params0 = HEX26_177447_2381377266(LOC10, ((NimStringDesc*) &T839829468_118)); { NI i_557964_839829468; NI HEX3Atmp_558025_839829468; NI LOC12; NI res_558028_839829468; i_557964_839829468 = (NI)0; HEX3Atmp_558025_839829468 = (NI)0; LOC12 = (NI)0; LOC12 = len_291081_850551059(n0); HEX3Atmp_558025_839829468 = (NI)(LOC12 - ((NI) 2)); res_558028_839829468 = ((NI) 1); { while (1) { Tnode290802* LOC15; Ropeobj177006* LOC16; if (!(res_558028_839829468 <= HEX3Atmp_558025_839829468)) goto LA14; i_557964_839829468 = res_558028_839829468; LOC15 = (Tnode290802*)0; LOC15 = HEX5BHEX5D_291238_850551059(n0, i_557964_839829468); initlocexpr_537283_839829468((*m0).initproc, LOC15, (&a0)); LOC16 = (Ropeobj177006*)0; LOC16 = rdloc_536188_839829468(a0); add_177482_2381377266(&params0, LOC16); add_177487_2381377266(&params0, ((NimStringDesc*) &T839829468_110)); res_558028_839829468 += ((NI) 1); } LA14: ; } } memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = tmp0; LOC17[1] = gettypedesc_533671_839829468(m0, (*sym0).typ); LOC17[2] = params0; LOC18 = (NimStringDesc*)0; LOC18 = HEX24_177856_2381377266(extname0); LOC17[3] = makecstring_189638_155036129(LOC18); load0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_235), LOC17, 4); last0 = lastson_293364_850551059(n0); { if (!((*last0).kind == ((Tnodekind290020) 58))) goto LA21; last0 = (*last0).kindU.S6.sons->data[((NI) 1)]; } LA21: ; { NimStringDesc* LOC27; if (!!(((*last0).kind == ((Tnodekind290020) 20)))) goto LA25; LOC27 = (NimStringDesc*)0; LOC27 = HEX24_194185_1689653243(T839829468_236); internalerror_194113_155036129(LOC27); } LA25: ; idx0 = (*last0).kindU.S3.strval; { Ropeobj177006** LOC32; if (!((idx0 ? idx0->Sup.len : 0) == ((NI) 0))) goto LA30; LOC32 = (Ropeobj177006**)0; LOC32 = s_527179_3723162438((*m0).initproc, ((Tcprocsection527011) 2)); add_177482_2381377266(LOC32, load0); } goto LA28; LA30: ; { NIM_BOOL LOC34; LOC34 = (NIM_BOOL)0; LOC34 = ((idx0 ? idx0->Sup.len : 0) == ((NI) 1)); if (!(LOC34)) goto LA35; LOC34 = (((NU8)(idx0->data[((NI) 0)])) >= ((NU8)(48)) && ((NU8)(idx0->data[((NI) 0)])) <= ((NU8)(57))); LA35: ; if (!LOC34) goto LA36; add_177482_2381377266(&(*m0).extensionloaders[(((NU8)(idx0->data[((NI) 0)])))- 48], load0); } goto LA28; LA36: ; { NimStringDesc* LOC39; LOC39 = (NimStringDesc*)0; LOC39 = rawNewString(idx0->Sup.len + 13); appendString(LOC39, ((NimStringDesc*) &T839829468_237)); appendString(LOC39, idx0); internalerror_194100_155036129((*sym0).info, LOC39); } LA28: ; } goto LA5; LA7: ; { TY533235 LOC41; NimStringDesc* LOC42; memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = tmp0; LOC41[1] = gettypedesc_533671_839829468(m0, (*sym0).typ); LOC41[2] = (*lib0).name; LOC42 = (NimStringDesc*)0; LOC42 = HEX24_177856_2381377266(extname0); LOC41[3] = makecstring_189638_155036129(LOC42); appcg_530632_839829468(m0, &(*m0).s[(((Tcfilesection527005) 16))- 0], ((NimStringDesc*) &T839829468_238), LOC41, 4); } LA5: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = (*sym0).loc.r; LOC43[1] = gettypedesc_533671_839829468(m0, (*sym0).loc.t); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_239), LOC43, 2); } N_NIMCALL(void, symindynamiclibpartial_558071_839829468)(Tcgen527027* m0, Tsym290834* sym0) { asgnRefNoCycle((void**) (&(*sym0).loc.r), mangledynlibproc_536816_839829468(sym0)); asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL); } N_NIMCALL(void, genprocnoforward_558906_839829468)(Tcgen527027* m0, Tsym290834* prc0) { { fillprocloc_537201_839829468(prc0); useheader_530369_839829468(m0, prc0); { Ropeobj177006* LOC5; if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag290810) 7))&15U)))!=0)) goto LA3; LOC5 = (Ropeobj177006*)0; LOC5 = cgsym_530403_839829468(m0, (*(*prc0).name).s); goto BeforeRet; } LA3: ; genprocprototype_537254_839829468(m0, prc0); { if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag290810) 3))&15U)))!=0)) goto LA8; } goto LA6; LA8: ; { if (!((*(*prc0).typ).callconv == ((Tcallingconvention290002) 5))) goto LA11; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = containsorincl_266862_2627731572((&(*m0).declaredthings), (*prc0).Sup.id); if (!!(LOC15)) goto LA16; genprocaux_558284_839829468(m0, prc0); } LA16: ; } goto LA6; LA11: ; { Tcgen527027* q0; if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag290810) 4))&15U)))!=0)) goto LA19; q0 = findpendingmodule_530241_839829468(m0, prc0); { NIM_BOOL LOC23; NIM_BOOL LOC25; LOC23 = (NIM_BOOL)0; LOC23 = !((q0 == NIM_NIL)); if (!(LOC23)) goto LA24; LOC25 = (NIM_BOOL)0; LOC25 = containsorincl_266862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id); LOC23 = !(LOC25); LA24: ; if (!LOC23) goto LA26; symindynamiclib_557929_839829468(q0, prc0); } goto LA21; LA26: ; { symindynamiclibpartial_558071_839829468(m0, prc0); } LA21: ; } goto LA6; LA19: ; { Tcgen527027* q0; if (!!((((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 5))&31U)))!=0))) goto LA30; q0 = findpendingmodule_530241_839829468(m0, prc0); { NIM_BOOL LOC34; NIM_BOOL LOC36; LOC34 = (NIM_BOOL)0; LOC34 = !((q0 == NIM_NIL)); if (!(LOC34)) goto LA35; LOC36 = (NIM_BOOL)0; LOC36 = containsorincl_266862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id); LOC34 = !(LOC36); LA35: ; if (!LOC34) goto LA37; genprocaux_558284_839829468(q0, prc0); } LA37: ; } goto LA6; LA30: ; LA6: ; }BeforeRet: ; } N_NIMCALL(void, genproc_530951_839829468)(Tcgen527027* m0, Tsym290834* prc0) { { { NIM_BOOL LOC3; NIM_BOOL LOC5; LOC3 = (NIM_BOOL)0; LOC3 = (((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 26))&31U)))!=0); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = isactivated_559431_839829468(prc0); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; goto BeforeRet; } LA6: ; fillprocloc_537201_839829468(prc0); { if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 4))&31U)))!=0)) goto LA10; addforwardedproc_530203_839829468(m0, prc0); } goto LA8; LA10: ; { genprocnoforward_558906_839829468(m0, prc0); { NIM_BOOL LOC15; NIM_BOOL LOC16; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC16 = ((65600 & (*prc0).flags) == 64); if (!(LOC16)) goto LA17; LOC16 = !((generatedheader_530201_839829468 == NIM_NIL)); LA17: ; LOC15 = LOC16; if (!(LOC15)) goto LA18; LOC15 = !((((*prc0).loc.flags &(1U<<((NU)(((Tlocflag290810) 3))&15U)))!=0)); LA18: ; if (!LOC15) goto LA19; genprocprototype_537254_839829468(generatedheader_530201_839829468, prc0); { if (!((*(*prc0).typ).callconv == ((Tcallingconvention290002) 5))) goto LA23; { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = containsorincl_266862_2627731572((&(*generatedheader_530201_839829468).declaredthings), (*prc0).Sup.id); if (!!(LOC27)) goto LA28; genprocaux_558284_839829468(generatedheader_530201_839829468, prc0); } LA28: ; } LA23: ; } LA19: ; } LA8: ; }BeforeRet: ; } static N_INLINE(NIM_BOOL, emulatedthreadvars_530949_839829468)(void) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((71303168 & ~ gglobaloptions_168130_2607990831)==0); return result0; } N_NIMCALL(void, declarethreadvar_536676_839829468)(Tcgen527027* m0, Tsym290834* s0, NIM_BOOL isextern0) { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = emulatedthreadvars_530949_839829468(); if (!LOC3) goto LA4; { NIM_BOOL LOC8; TY530811 LOC11; LOC8 = (NIM_BOOL)0; LOC8 = containsorincl_266862_2627731572((&nimtvdeclared_536675_839829468), (*s0).Sup.id); if (!!(LOC8)) goto LA9; nimtvdeps_536674_839829468 = (Ttypeseq290836*) incrSeqV2(&(nimtvdeps_536674_839829468)->Sup, sizeof(Ttype290840*)); asgnRefNoCycle((void**) (&nimtvdeps_536674_839829468->data[nimtvdeps_536674_839829468->Sup.len]), (*s0).loc.t); ++nimtvdeps_536674_839829468->Sup.len; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_533671_839829468(m0, (*s0).loc.t); LOC11[1] = (*s0).loc.r; addf_178205_2381377266(&nimtv_536656_839829468, ((NimStringDesc*) &T839829468_54), LOC11, 2); } LA9: ; } goto LA1; LA4: ; { Ropeobj177006* LOC21; TY177507 LOC22; { if (!isextern0) goto LA15; add_177487_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_240)); } LA15: ; { if (!((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 22))&63U)))!=0)) goto LA19; add_177487_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_241)); } LA19: ; LOC21 = (Ropeobj177006*)0; LOC21 = gettypedesc_533671_839829468(m0, (*s0).loc.t); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], LOC21); memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = (*s0).loc.r; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC22, 1); } LA1: ; } N_NIMCALL(void, genvarprototypeaux_542254_839829468)(Tcgen527027* m0, Tsym290834* sym0) { Ropeobj177006* LOC1; { useheader_530369_839829468(m0, sym0); LOC1 = (Ropeobj177006*)0; LOC1 = manglename_531205_839829468(sym0); fillloc_530282_839829468((&(*sym0).loc), ((Tlockind290808) 3), (*sym0).typ, LOC1, ((Tstorageloc290812) 3)); { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (((*sym0).loc.flags &(1U<<((NU)(((Tlocflag290810) 3))&15U)))!=0); if (LOC4) goto LA5; LOC4 = containsorincl_266862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LA5: ; if (!LOC4) goto LA6; goto BeforeRet; } LA6: ; { if (!!(((*(*sym0).owner).Sup.id == (*(*m0).module).Sup.id))) goto LA10; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag290184) 22))&31U)))!=0)) goto LA14; declarethreadvar_536676_839829468(m0, sym0, NIM_TRUE); } goto LA12; LA14: ; { Ropeobj177006* LOC17; TY177507 LOC30; add_177487_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_240)); LOC17 = (Ropeobj177006*)0; LOC17 = gettypedesc_533671_839829468(m0, (*sym0).loc.t); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], LOC17); { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag290810) 4))&15U)))!=0)) goto LA20; add_177487_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_53)); } LA20: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag290184) 8))&31U)))!=0)) goto LA24; add_177487_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_121)); } LA24: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag290184) 7))&31U)))!=0)) goto LA28; add_177487_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_122)); } LA28: ; memset((void*)LOC30, 0, sizeof(LOC30)); LOC30[0] = (*sym0).loc.r; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC30, 1); } LA12: ; } LA10: ; }BeforeRet: ; } N_NIMCALL(void, genvarprototype_537236_839829468)(Tcgen527027* m0, Tsym290834* sym0) { genvarprototypeaux_542254_839829468(m0, sym0); } N_NIMCALL(Ropeobj177006*, cgsym_530403_839829468)(Tcgen527027* m0, NimStringDesc* name0) { Ropeobj177006* result0; Tsym290834* sym0; result0 = (Ropeobj177006*)0; sym0 = getcompilerproc_336746_3937434831(name0); { if (!!((sym0 == NIM_NIL))) goto LA3; switch ((*sym0).kind) { case ((Tsymkind290435) 12): case ((Tsymkind290435) 13): case ((Tsymkind290435) 15): case ((Tsymkind290435) 14): { genproc_530951_839829468(m0, sym0); } break; case ((Tsymkind290435) 8): case ((Tsymkind290435) 11): case ((Tsymkind290435) 9): { genvarprototype_537236_839829468(m0, sym0); } break; case ((Tsymkind290435) 7): { Ropeobj177006* LOC8; LOC8 = (Ropeobj177006*)0; LOC8 = gettypedesc_533671_839829468(m0, (*sym0).typ); } break; default: { NimStringDesc* LOC10; LOC10 = (NimStringDesc*)0; LOC10 = rawNewString(name0->Sup.len + reprEnum((NI)(*sym0).kind, (&NTI290435))->Sup.len + 9); appendString(LOC10, ((NimStringDesc*) &T839829468_243)); appendString(LOC10, name0); appendString(LOC10, ((NimStringDesc*) &T839829468_244)); appendString(LOC10, reprEnum((NI)(*sym0).kind, (&NTI290435))); internalerror_194113_155036129(LOC10); } break; } } goto LA1; LA3: ; { rawmessage_192612_155036129(((Tmsgkind189002) 68), name0); } LA1: ; result0 = (*sym0).loc.r; return result0; } N_NIMCALL(Ropeobj177006*, ropecg_530407_839829468)(Tcgen527027* m0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0) { Ropeobj177006* result0; NI i0; NI length0; NI num0; result0 = (Ropeobj177006*)0; i0 = ((NI) 0); length0 = (frmt0 ? frmt0->Sup.len : 0); result0 = NIM_NIL; num0 = ((NI) 0); { while (1) { NI start0; if (!(i0 < length0)) goto LA2; { if (!((NU8)(frmt0->data[i0]) == (NU8)(36))) goto LA5; i0 += ((NI) 1); switch (((NU8)(frmt0->data[i0]))) { case 36: { add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_19)); i0 += ((NI) 1); } break; case 35: { i0 += ((NI) 1); add_177482_2381377266(&result0, args0[num0]); num0 += ((NI) 1); } break; case 48 ... 57: { NI j0; j0 = ((NI) 0); { while (1) { j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48)); i0 += ((NI) 1); { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = (length0 <= i0); if (LOC14) goto LA15; LOC14 = !((((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))); LA15: ; if (!LOC14) goto LA16; goto LA10; } LA16: ; } } LA10: ; num0 = j0; { NimStringDesc* LOC22; NimStringDesc* LOC23; if (!((NI)((args0Len0-1) + ((NI) 1)) < j0)) goto LA20; LOC22 = (NimStringDesc*)0; LOC23 = (NimStringDesc*)0; LOC23 = nimIntToStr(j0); LOC22 = rawNewString(LOC23->Sup.len + 30); appendString(LOC22, ((NimStringDesc*) &T839829468_20)); appendString(LOC22, LOC23); internalerror_194113_155036129(LOC22); } LA20: ; add_177482_2381377266(&result0, args0[(NI)(j0 - ((NI) 1))]); } break; case 110: { { if (!!(((goptions_168128_2607990831 &(1U<<((NU)(((Toption168009) 10))&31U)))!=0))) goto LA27; add_177482_2381377266(&result0, rnl_177903_2381377266); } LA27: ; i0 += ((NI) 1); } break; case 78: { add_177482_2381377266(&result0, rnl_177903_2381377266); i0 += ((NI) 1); } break; default: { NimStringDesc* LOC31; LOC31 = (NimStringDesc*)0; LOC31 = rawNewString(31); appendString(LOC31, ((NimStringDesc*) &T839829468_20)); appendChar(LOC31, frmt0->data[i0]); internalerror_194113_155036129(LOC31); } break; } } goto LA3; LA5: ; { NIM_BOOL LOC33; NI j0; NimStringDesc* ident0; Ropeobj177006* LOC39; LOC33 = (NIM_BOOL)0; LOC33 = ((NU8)(frmt0->data[i0]) == (NU8)(35)); if (!(LOC33)) goto LA34; LOC33 = (((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(97)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(122)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(65)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(90)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(95))); LA34: ; if (!LOC33) goto LA35; i0 += ((NI) 1); j0 = i0; { while (1) { if (!(((NU8)(frmt0->data[j0])) >= ((NU8)(97)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(122)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(65)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(90)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(57)) || ((NU8)(frmt0->data[j0])) == ((NU8)(95)))) goto LA38; j0 += ((NI) 1); } LA38: ; } ident0 = copyStrLast(frmt0, i0, (NI)(j0 - ((NI) 1))); i0 = j0; LOC39 = (Ropeobj177006*)0; LOC39 = cgsym_530403_839829468(m0, ident0); add_177482_2381377266(&result0, LOC39); } goto LA3; LA35: ; { NIM_BOOL LOC41; NI j0; NimStringDesc* LOC47; Ropeobj177006* LOC48; LOC41 = (NIM_BOOL)0; LOC41 = ((NU8)(frmt0->data[i0]) == (NU8)(35)); if (!(LOC41)) goto LA42; LOC41 = ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(36)); LA42: ; if (!LOC41) goto LA43; i0 += ((NI) 2); j0 = ((NI) 0); { while (1) { if (!(((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))) goto LA46; j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48)); i0 += ((NI) 1); } LA46: ; } LOC47 = (NimStringDesc*)0; LOC47 = HEX24_177856_2381377266(args0[(NI)(j0 - ((NI) 1))]); LOC48 = (Ropeobj177006*)0; LOC48 = cgsym_530403_839829468(m0, LOC47); add_177482_2381377266(&result0, LOC48); } goto LA3; LA43: ; LA3: ; start0 = i0; { while (1) { if (!(i0 < length0)) goto LA50; { NIM_BOOL LOC53; LOC53 = (NIM_BOOL)0; LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(36))); if (!(LOC53)) goto LA54; LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(35))); LA54: ; if (!LOC53) goto LA55; i0 += ((NI) 1); } goto LA51; LA55: ; { goto LA49; } LA51: ; } LA50: ; } LA49: ; { NimStringDesc* LOC62; if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA60; LOC62 = (NimStringDesc*)0; LOC62 = copyStrLast(frmt0, start0, (NI)(i0 - ((NI) 1))); add_177487_2381377266(&result0, LOC62); } LA60: ; } LA2: ; } return result0; } static N_INLINE(NIM_BOOL, crossescppboundary_558754_839829468)(Tcgen527027* m0, Tsym290834* sym0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; Tsym290834* LOC4; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); if (!(LOC2)) goto LA3; LOC4 = (Tsym290834*)0; LOC4 = getmodule_297123_2984716966(sym0); LOC2 = !((((*LOC4).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0)); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA5; LOC1 = !((gcmd_168132_2607990831 == ((Tcommands168076) 2))); LA5: ; result0 = LOC1; return result0; } N_NIMCALL(void, genprocprototype_537254_839829468)(Tcgen527027* m0, Tsym290834* sym0) { { useheader_530369_839829468(m0, sym0); { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag290810) 3))&15U)))!=0)) goto LA3; goto BeforeRet; } LA3: ; { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag290810) 4))&15U)))!=0)) goto LA7; { NIM_BOOL LOC11; Tsym290834* LOC12; NIM_BOOL LOC14; TY530811 LOC17; Ropeobj177006* LOC18; LOC11 = (NIM_BOOL)0; LOC12 = (Tsym290834*)0; LOC12 = getmodule_297123_2984716966(sym0); LOC11 = !(((*LOC12).Sup.id == (*(*m0).module).Sup.id)); if (!(LOC11)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_266862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LOC11 = !(LOC14); LA13: ; if (!LOC11) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_533671_839829468(m0, (*sym0).loc.t); LOC17[1] = mangledynlibproc_536816_839829468(sym0); LOC18 = (Ropeobj177006*)0; LOC18 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_245), LOC17, 2); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], LOC18); } LA15: ; } goto LA5; LA7: ; { NIM_BOOL LOC20; Ropeobj177006* header0; TY177507 LOC47; Ropeobj177006* LOC48; LOC20 = (NIM_BOOL)0; LOC20 = containsorincl_266862_2627731572((&(*m0).declaredprotos), (*sym0).Sup.id); if (!!(LOC20)) goto LA21; header0 = genprocheader_533867_839829468(m0, sym0); { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = (((*sym0).flags &(1U<<((NU)(((Tsymflag290184) 14))&31U)))!=0); if (!(LOC25)) goto LA26; LOC25 = ((Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop271004) 6))&7U)))!=0); LA26: ; if (!LOC25) goto LA27; header0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_213), header0); } LA27: ; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = !(((*(*sym0).typ).callconv == ((Tcallingconvention290002) 5))); if (!(LOC31)) goto LA32; LOC31 = crossescppboundary_558754_839829468(m0, sym0); LA32: ; if (!LOC31) goto LA33; header0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_246), header0); } LA33: ; { NIM_BOOL LOC37; LOC37 = (NIM_BOOL)0; LOC37 = (((*sym0).flags &(1U<<((NU)(((Tsymflag290184) 9))&31U)))!=0); if (!(LOC37)) goto LA38; LOC37 = ((Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop271004) 7))&7U)))!=0); LA38: ; if (!LOC37) goto LA39; add_177487_2381377266(&header0, ((NimStringDesc*) &T839829468_247)); } LA39: ; { NIM_BOOL LOC43; LOC43 = (NIM_BOOL)0; LOC43 = (((*sym0).flags &(1U<<((NU)(((Tsymflag290184) 14))&31U)))!=0); if (!(LOC43)) goto LA44; LOC43 = ((Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop271004) 7))&7U)))!=0); LA44: ; if (!LOC43) goto LA45; add_177487_2381377266(&header0, ((NimStringDesc*) &T839829468_248)); } LA45: ; memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = header0; LOC48 = (Ropeobj177006*)0; LOC48 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_191), LOC47, 1); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 7))- 0], LOC48); } goto LA5; LA21: ; LA5: ; }BeforeRet: ; } static N_INLINE(NIM_BOOL, usesnativegc_168177_2607990831)(void) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = (((Tgcmode168080) 5) <= gselectedgc_168133_2607990831); return result0; } N_NIMCALL(void, genrefassign_536311_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0, Tassignmentflag536302Set flags0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; TY530811 LOC8; LOC3 = (NIM_BOOL)0; LOC3 = (dest0.s == ((Tstorageloc290812) 2)); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = usesnativegc_168177_2607990831(); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_536188_839829468(dest0); LOC8[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC8, 2); } goto LA1; LA6: ; { if (!(dest0.s == ((Tstorageloc290812) 3))) goto LA10; { NIM_BOOL LOC14; TY530811 LOC17; LOC14 = (NIM_BOOL)0; LOC14 = canformacycle_318123_3876443242(dest0.t); if (!LOC14) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_536204_839829468(dest0); LOC17[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_249), LOC17, 2); } goto LA12; LA15: ; { TY530811 LOC19; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_536204_839829468(dest0); LOC19[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_250), LOC19, 2); } LA12: ; } goto LA1; LA10: ; { TY530811 LOC21; memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = addrloc_536204_839829468(dest0); LOC21[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_251), LOC21, 2); } LA1: ; } N_NIMCALL(void, optasgnloc_547788_839829468)(Tloc290816 a0, Ttype290840* t0, Ropeobj177006* field0, Tloc290816* Result) { Ropeobj177006* LOC1; Ropeobj177006* LOC2; (*Result).k = ((Tlockind290808) 5); (*Result).s = a0.s; unsureAsgnRef((void**) (&(*Result).t), t0); LOC1 = (Ropeobj177006*)0; LOC1 = rdloc_536188_839829468(a0); LOC2 = (Ropeobj177006*)0; LOC2 = HEX26_177447_2381377266(LOC1, ((NimStringDesc*) &T839829468_257)); unsureAsgnRef((void**) (&(*Result).r), HEX26_177418_2381377266(LOC2, field0)); } N_NIMCALL(void, genoptasgntuple_548001_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0, Tassignmentflag536302Set flags0) { Tassignmentflag536302Set newflags0; Ttype290840* t_548053_839829468; Ttype290840* LOC9; { if (!(src0.s == ((Tstorageloc290812) 1))) goto LA3; newflags0 = (flags0 | 1); } goto LA1; LA3: ; { if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag290431) 6))&31U)))!=0)) goto LA6; newflags0 = (flags0 & ~ 1); } goto LA1; LA6: ; { newflags0 = flags0; } LA1: ; LOC9 = (Ttype290840*)0; LOC9 = skiptypes_294099_850551059(dest0.t, IL64(211106232576256)); t_548053_839829468 = getuniquetype_526640_2036603609(LOC9); { NI i_548071_839829468; NI HEX3Atmp_548077_839829468; NI LOC11; NI res_548080_839829468; i_548071_839829468 = (NI)0; HEX3Atmp_548077_839829468 = (NI)0; LOC11 = (NI)0; LOC11 = len_293339_850551059(t_548053_839829468); HEX3Atmp_548077_839829468 = (LOC11 - 1); res_548080_839829468 = ((NI) 0); { while (1) { Ttype290840* t0; Ropeobj177006* field0; TY177507 LOC14; Tloc290816 LOC15; Tloc290816 LOC16; if (!(res_548080_839829468 <= HEX3Atmp_548077_839829468)) goto LA13; i_548071_839829468 = res_548080_839829468; t0 = (*t_548053_839829468).sons->data[i_548071_839829468]; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_177401_2381377266(((NI64) (i_548071_839829468))); field0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_260), LOC14, 1); memset((void*)(&LOC15), 0, sizeof(LOC15)); optasgnloc_547788_839829468(dest0, t0, field0, (&LOC15)); memset((void*)(&LOC16), 0, sizeof(LOC16)); optasgnloc_547788_839829468(src0, t0, field0, (&LOC16)); genassignment_537264_839829468(p0, LOC15, LOC16, newflags0); res_548080_839829468 += ((NI) 1); } LA13: ; } } } N_NIMCALL(void, gengenericasgn_548167_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0, Tassignmentflag536302Set flags0) { { NIM_BOOL LOC3; Ttype290840* LOC5; LOC3 = (NIM_BOOL)0; LOC3 = !(((flags0 &(1U<<((NU)(((Tassignmentflag536302) 0))&7U)))!=0)); if (LOC3) goto LA4; LOC5 = (Ttype290840*)0; LOC5 = skiptypes_294099_850551059(dest0.t, IL64(211106242013440)); LOC3 = (((*LOC5).flags &(1U<<((NU)(((Ttypeflag290431) 6))&31U)))!=0); LA4: ; if (!LOC3) goto LA6; { NIM_BOOL LOC10; NIM_BOOL LOC12; TY533238 LOC15; LOC10 = (NIM_BOOL)0; LOC10 = (dest0.s == ((Tstorageloc290812) 2)); if (LOC10) goto LA11; LOC12 = (NIM_BOOL)0; LOC12 = usesnativegc_168177_2607990831(); LOC10 = !(LOC12); LA11: ; if (!LOC10) goto LA13; usestringh_530345_839829468((*p0).module); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = addrloc_536204_839829468(dest0); LOC15[1] = addrloc_536204_839829468(src0); LOC15[2] = rdloc_536188_839829468(dest0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_261), LOC15, 3); } goto LA8; LA13: ; { TY533238 LOC17; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_536204_839829468(dest0); LOC17[1] = addrloc_536204_839829468(src0); LOC17[2] = gentypeinfo_533941_839829468((*p0).module, dest0.t); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_262), LOC17, 3); } LA8: ; } goto LA1; LA6: ; { TY533238 LOC19; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = addrloc_536204_839829468(dest0); LOC19[1] = addrloc_536204_839829468(src0); LOC19[2] = gentypeinfo_533941_839829468((*p0).module, dest0.t); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_263), LOC19, 3); } LA1: ; } N_NIMCALL(NI, asgncomplexity_547750_839829468)(Tnode290802* n0) { NI result0; result0 = (NI)0; { if (!!((n0 == NIM_NIL))) goto LA3; switch ((*n0).kind) { case ((Tnodekind290020) 3): { result0 = ((NI) 1); } break; case ((Tnodekind290020) 139): { result0 = ((NI) 100); } break; case ((Tnodekind290020) 138): { { Tnode290802* t_547767_839829468; t_547767_839829468 = (Tnode290802*)0; { NI i_547781_839829468; NI HEX3Atmp_547783_839829468; NI LOC10; NI res_547785_839829468; i_547781_839829468 = (NI)0; HEX3Atmp_547783_839829468 = (NI)0; LOC10 = (NI)0; LOC10 = len_291081_850551059(n0); HEX3Atmp_547783_839829468 = (LOC10 - 1); res_547785_839829468 = ((NI) 0); { while (1) { NI LOC13; if (!(res_547785_839829468 <= HEX3Atmp_547783_839829468)) goto LA12; i_547781_839829468 = res_547785_839829468; t_547767_839829468 = (*n0).kindU.S6.sons->data[i_547781_839829468]; LOC13 = (NI)0; LOC13 = asgncomplexity_547750_839829468(t_547767_839829468); result0 += LOC13; res_547785_839829468 += ((NI) 1); } LA12: ; } } } } break; default: { } break; } } LA3: ; return result0; } N_NIMCALL(void, genoptasgnobject_548084_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0, Tassignmentflag536302Set flags0, Tnode290802* t0) { Tassignmentflag536302Set newflags0; { { if (!(t0 == NIM_NIL)) goto LA3; goto BeforeRet; } LA3: ; { if (!(src0.s == ((Tstorageloc290812) 1))) goto LA7; newflags0 = (flags0 | 1); } goto LA5; LA7: ; { if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag290431) 6))&31U)))!=0)) goto LA10; newflags0 = (flags0 & ~ 1); } goto LA5; LA10: ; { newflags0 = flags0; } LA5: ; switch ((*t0).kind) { case ((Tnodekind290020) 3): { Tsym290834* field0; Tloc290816 LOC14; Tloc290816 LOC15; field0 = (*t0).kindU.S4.sym; memset((void*)(&LOC14), 0, sizeof(LOC14)); optasgnloc_547788_839829468(dest0, (*field0).typ, (*field0).loc.r, (&LOC14)); memset((void*)(&LOC15), 0, sizeof(LOC15)); optasgnloc_547788_839829468(src0, (*field0).typ, (*field0).loc.r, (&LOC15)); genassignment_537264_839829468(p0, LOC14, LOC15, newflags0); } break; case ((Tnodekind290020) 138): { { Tnode290802* child_548155_839829468; child_548155_839829468 = (Tnode290802*)0; { NI i_548160_839829468; NI HEX3Atmp_548162_839829468; NI LOC19; NI res_548164_839829468; i_548160_839829468 = (NI)0; HEX3Atmp_548162_839829468 = (NI)0; LOC19 = (NI)0; LOC19 = len_291081_850551059(t0); HEX3Atmp_548162_839829468 = (LOC19 - 1); res_548164_839829468 = ((NI) 0); { while (1) { if (!(res_548164_839829468 <= HEX3Atmp_548162_839829468)) goto LA21; i_548160_839829468 = res_548164_839829468; child_548155_839829468 = (*t0).kindU.S6.sons->data[i_548160_839829468]; genoptasgnobject_548084_839829468(p0, dest0, src0, newflags0, child_548155_839829468); res_548164_839829468 += ((NI) 1); } LA21: ; } } } } break; default: { } break; } }BeforeRet: ; } N_NIMCALL(void, genassignment_537264_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0, Tassignmentflag536302Set flags0) { Ttype290840* ty0; { { NIM_BOOL LOC3; TY530811 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = !((src0.t == NIM_NIL)); if (!(LOC3)) goto LA4; LOC3 = ((*src0.t).kind == ((Ttypekind290244) 21)); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_536188_839829468(dest0); LOC7[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC7, 2); goto BeforeRet; } LA5: ; ty0 = skiptypes_294099_850551059(dest0.t, IL64(211106233624832)); switch ((*ty0).kind) { case ((Ttypekind290244) 22): { genrefassign_536311_839829468(p0, dest0, src0, flags0); } break; case ((Ttypekind290244) 24): { { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = !(((flags0 &(1U<<((NU)(((Tassignmentflag536302) 0))&7U)))!=0)); if (!(LOC12)) goto LA13; LOC12 = !((src0.s == ((Tstorageloc290812) 1))); LA13: ; if (!LOC12) goto LA14; genrefassign_536311_839829468(p0, dest0, src0, flags0); } goto LA10; LA14: ; { TY533238 LOC17; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = addrloc_536204_839829468(dest0); LOC17[1] = rdloc_536188_839829468(src0); LOC17[2] = gentypeinfo_533941_839829468((*p0).module, dest0.t); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_252), LOC17, 3); } LA10: ; } break; case ((Ttypekind290244) 28): { { NIM_BOOL LOC21; LOC21 = (NIM_BOOL)0; LOC21 = !(((flags0 &(1U<<((NU)(((Tassignmentflag536302) 0))&7U)))!=0)); if (!(LOC21)) goto LA22; LOC21 = !((src0.s == ((Tstorageloc290812) 1))); LA22: ; if (!LOC21) goto LA23; genrefassign_536311_839829468(p0, dest0, src0, flags0); } goto LA19; LA23: ; { { NIM_BOOL LOC28; NIM_BOOL LOC30; TY530811 LOC33; LOC28 = (NIM_BOOL)0; LOC28 = (dest0.s == ((Tstorageloc290812) 2)); if (LOC28) goto LA29; LOC30 = (NIM_BOOL)0; LOC30 = usesnativegc_168177_2607990831(); LOC28 = !(LOC30); LA29: ; if (!LOC28) goto LA31; memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rdloc_536188_839829468(dest0); LOC33[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_253), LOC33, 2); } goto LA26; LA31: ; { Tloc290816 tmp0; TY533238 LOC37; TY177507 LOC38; if (!(dest0.s == ((Tstorageloc290812) 3))) goto LA35; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_535032_839829468(p0, ty0, (&tmp0), NIM_FALSE); memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = rdloc_536188_839829468(dest0); LOC37[1] = rdloc_536188_839829468(src0); LOC37[2] = rdloc_536188_839829468(tmp0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_254), LOC37, 3); memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = rdloc_536188_839829468(tmp0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_255), LOC38, 1); } goto LA26; LA35: ; { TY530811 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = addrloc_536204_839829468(dest0); LOC40[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_256), LOC40, 2); } LA26: ; } LA19: ; } break; case ((Ttypekind290244) 25): { { NIM_BOOL LOC44; Tloc290816 a0; Ropeobj177006* LOC47; Tloc290816 LOC48; Tloc290816 b0; Ropeobj177006* LOC49; Tloc290816 LOC50; TY530811 LOC51; LOC44 = (NIM_BOOL)0; LOC44 = needscomplexassignment_531509_839829468(dest0.t); if (!LOC44) goto LA45; memset((void*)(&a0), 0, sizeof(a0)); LOC47 = (Ropeobj177006*)0; LOC47 = rope_177277_2381377266(((NimStringDesc*) &T839829468_258)); memset((void*)(&LOC48), 0, sizeof(LOC48)); optasgnloc_547788_839829468(dest0, dest0.t, LOC47, (&LOC48)); memcpy((void*)(&a0), (NIM_CONST void*)(&LOC48), sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); LOC49 = (Ropeobj177006*)0; LOC49 = rope_177277_2381377266(((NimStringDesc*) &T839829468_258)); memset((void*)(&LOC50), 0, sizeof(LOC50)); optasgnloc_547788_839829468(src0, dest0.t, LOC49, (&LOC50)); memcpy((void*)(&b0), (NIM_CONST void*)(&LOC50), sizeof(b0)); genrefassign_536311_839829468(p0, a0, b0, flags0); memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = rdloc_536188_839829468(dest0); LOC51[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_259), LOC51, 2); } goto LA42; LA45: ; { TY530811 LOC53; memset((void*)LOC53, 0, sizeof(LOC53)); LOC53[0] = rdloc_536188_839829468(dest0); LOC53[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC53, 2); } LA42: ; } break; case ((Ttypekind290244) 18): { { NIM_BOOL LOC57; LOC57 = (NIM_BOOL)0; LOC57 = needscomplexassignment_531509_839829468(dest0.t); if (!LOC57) goto LA58; { NI LOC62; LOC62 = (NI)0; LOC62 = len_293339_850551059(dest0.t); if (!(LOC62 <= ((NI) 4))) goto LA63; genoptasgntuple_548001_839829468(p0, dest0, src0, flags0); } goto LA60; LA63: ; { gengenericasgn_548167_839829468(p0, dest0, src0, flags0); } LA60: ; } goto LA55; LA58: ; { TY530811 LOC67; memset((void*)LOC67, 0, sizeof(LOC67)); LOC67[0] = rdloc_536188_839829468(dest0); LOC67[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC67, 2); } LA55: ; } break; case ((Ttypekind290244) 17): { { NIM_BOOL LOC71; TY530811 LOC74; LOC71 = (NIM_BOOL)0; LOC71 = isimportedcpptype_531476_839829468(ty0); if (!LOC71) goto LA72; memset((void*)LOC74, 0, sizeof(LOC74)); LOC74[0] = rdloc_536188_839829468(dest0); LOC74[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC74, 2); } goto LA69; LA72: ; { NIM_BOOL LOC76; LOC76 = (NIM_BOOL)0; LOC76 = isobjlackingtypefield_531513_839829468(ty0); if (!!(LOC76)) goto LA77; gengenericasgn_548167_839829468(p0, dest0, src0, flags0); } goto LA69; LA77: ; { NIM_BOOL LOC80; LOC80 = (NIM_BOOL)0; LOC80 = needscomplexassignment_531509_839829468(ty0); if (!LOC80) goto LA81; { NIM_BOOL LOC85; NI LOC87; Ropeobj177006* LOC90; LOC85 = (NIM_BOOL)0; LOC85 = (*ty0).sons->data[((NI) 0)] == 0; if (!(LOC85)) goto LA86; LOC87 = (NI)0; LOC87 = asgncomplexity_547750_839829468((*ty0).n); LOC85 = (LOC87 <= ((NI) 4)); LA86: ; if (!LOC85) goto LA88; LOC90 = (Ropeobj177006*)0; LOC90 = gettypedesc_533671_839829468((*p0).module, ty0); ty0 = getuniquetype_526640_2036603609(ty0); { NimStringDesc* LOC95; if (!!(!(((*ty0).n == NIM_NIL)))) goto LA93; LOC95 = (NimStringDesc*)0; LOC95 = HEX24_194185_1689653243(T839829468_264); internalerror_194113_155036129(LOC95); } LA93: ; genoptasgnobject_548084_839829468(p0, dest0, src0, flags0, (*ty0).n); } goto LA83; LA88: ; { gengenericasgn_548167_839829468(p0, dest0, src0, flags0); } LA83: ; } goto LA69; LA81: ; { TY530811 LOC98; memset((void*)LOC98, 0, sizeof(LOC98)); LOC98[0] = rdloc_536188_839829468(dest0); LOC98[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC98, 2); } LA69: ; } break; case ((Ttypekind290244) 16): case ((Ttypekind290244) 4): { { NIM_BOOL LOC102; LOC102 = (NIM_BOOL)0; LOC102 = needscomplexassignment_531509_839829468(dest0.t); if (!LOC102) goto LA103; gengenericasgn_548167_839829468(p0, dest0, src0, flags0); } goto LA100; LA103: ; { TY533238 LOC106; usestringh_530345_839829468((*p0).module); memset((void*)LOC106, 0, sizeof(LOC106)); LOC106[0] = rdloc_536188_839829468(dest0); LOC106[1] = rdloc_536188_839829468(src0); LOC106[2] = gettypedesc_533671_839829468((*p0).module, ty0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_261), LOC106, 3); } LA100: ; } break; case ((Ttypekind290244) 27): case ((Ttypekind290244) 48): { { NIM_BOOL LOC110; TY533238 LOC113; LOC110 = (NIM_BOOL)0; LOC110 = needscomplexassignment_531509_839829468(dest0.t); if (!LOC110) goto LA111; memset((void*)LOC113, 0, sizeof(LOC113)); LOC113[0] = addrloc_536204_839829468(dest0); LOC113[1] = addrloc_536204_839829468(src0); LOC113[2] = gentypeinfo_533941_839829468((*p0).module, dest0.t); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_266), LOC113, 3); } goto LA108; LA111: ; { TY530811 LOC115; usestringh_530345_839829468((*p0).module); memset((void*)LOC115, 0, sizeof(LOC115)); LOC115[0] = rdloc_536188_839829468(dest0); LOC115[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_267), LOC115, 2); } LA108: ; } break; case ((Ttypekind290244) 19): { { Tctypekind527007 LOC119; TY533238 LOC122; NI64 LOC123; LOC119 = (Tctypekind527007)0; LOC119 = maptype_531393_839829468(ty0); if (!(LOC119 == ((Tctypekind527007) 17))) goto LA120; usestringh_530345_839829468((*p0).module); memset((void*)LOC122, 0, sizeof(LOC122)); LOC122[0] = rdloc_536188_839829468(dest0); LOC122[1] = rdloc_536188_839829468(src0); LOC123 = (NI64)0; LOC123 = getsize_318135_3876443242(dest0.t); LOC122[2] = rope_177401_2381377266(LOC123); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_268), LOC122, 3); } goto LA117; LA120: ; { TY530811 LOC125; memset((void*)LOC125, 0, sizeof(LOC125)); LOC125[0] = rdloc_536188_839829468(dest0); LOC125[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC125, 2); } LA117: ; } break; case ((Ttypekind290244) 21): case ((Ttypekind290244) 26): case ((Ttypekind290244) 2): case ((Ttypekind290244) 1): case ((Ttypekind290244) 14): case ((Ttypekind290244) 29): case ((Ttypekind290244) 31) ... ((Ttypekind290244) 44): case ((Ttypekind290244) 20): case ((Ttypekind290244) 23): { TY530811 LOC127; memset((void*)LOC127, 0, sizeof(LOC127)); LOC127[0] = rdloc_536188_839829468(dest0); LOC127[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC127, 2); } break; default: { NimStringDesc* LOC129; LOC129 = (NimStringDesc*)0; LOC129 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI290244))->Sup.len + 15); appendString(LOC129, ((NimStringDesc*) &T839829468_269)); appendString(LOC129, reprEnum((NI)(*ty0).kind, (&NTI290244))); internalerror_194113_155036129(LOC129); } break; } }BeforeRet: ; } N_NIMCALL(void, putlocintodest_537258_839829468)(Tcproc527021* p0, Tloc290816* d0, Tloc290816 s0) { { if (!!(((*d0).k == ((Tlockind290808) 0)))) goto LA3; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag290810) 2))&15U)))!=0)) goto LA7; genassignment_537264_839829468(p0, (*d0), s0, 0); } goto LA5; LA7: ; { genassignment_537264_839829468(p0, (*d0), s0, 1); } LA5: ; } goto LA1; LA3: ; { genericAssign((void*)(&(*d0)), (void*)(&s0), (&NTI290816)); } LA1: ; } N_NIMCALL(NIM_BOOL, issimpleconst_530311_839829468)(Ttype290840* typ0) { NIM_BOOL result0; Ttype290840* t0; NIM_BOOL LOC1; NIM_BOOL LOC3; result0 = (NIM_BOOL)0; t0 = skiptypes_294099_850551059(typ0, IL64(211106240964864)); LOC1 = (NIM_BOOL)0; LOC1 = !(((*t0).kind == ((Ttypekind290244) 18) || (*t0).kind == ((Ttypekind290244) 17) || (*t0).kind == ((Ttypekind290244) 16) || (*t0).kind == ((Ttypekind290244) 4) || (*t0).kind == ((Ttypekind290244) 19) || (*t0).kind == ((Ttypekind290244) 24))); if (!(LOC1)) goto LA2; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind290244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention290002) 8)); LA4: ; LOC1 = !(LOC3); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, putintodest_548468_839829468)(Tcproc527021* p0, Tloc290816* d0, Ttype290840* t0, Ropeobj177006* r0, Tstorageloc290812 s0) { Tloc290816 a0; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind290808) 0)))) goto LA3; initloc_530273_839829468((&a0), ((Tlockind290808) 6), t0, s0); a0.r = r0; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag290810) 2))&15U)))!=0)) goto LA7; genassignment_537264_839829468(p0, (*d0), a0, 0); } goto LA5; LA7: ; { genassignment_537264_839829468(p0, (*d0), a0, 1); } LA5: ; } goto LA1; LA3: ; { (*d0).k = ((Tlockind290808) 6); unsureAsgnRef((void**) (&(*d0).t), t0); unsureAsgnRef((void**) (&(*d0).r), r0); } LA1: ; } N_NIMCALL(NI64, bitsettoword_547578_839829468)(Tbitset337004* s0, NI size0) { NI64 result0; result0 = (NI64)0; result0 = IL64(0); { NI j_547612_839829468; NI HEX3Atmp_547622_839829468; NI res_547625_839829468; j_547612_839829468 = (NI)0; HEX3Atmp_547622_839829468 = (NI)0; HEX3Atmp_547622_839829468 = (NI)(size0 - ((NI) 1)); res_547625_839829468 = ((NI) 0); { while (1) { if (!(res_547625_839829468 <= HEX3Atmp_547622_839829468)) goto LA3; j_547612_839829468 = res_547625_839829468; { if (!(j_547612_839829468 < (s0 ? s0->Sup.len : 0))) goto LA6; result0 = (NI64)(result0 | (NI64)((NU64)(((NI64)(NU64)(NU8)(s0->data[j_547612_839829468]))) << (NU64)(((NI64) ((NI)(j_547612_839829468 * ((NI) 8))))))); } LA6: ; res_547625_839829468 += ((NI) 1); } LA3: ; } } return result0; } N_NIMCALL(Ropeobj177006*, genrawsetdata_547629_839829468)(Tbitset337004* cs0, NI size0) { Ropeobj177006* result0; NimStringDesc* frmt0; result0 = (Ropeobj177006*)0; frmt0 = (NimStringDesc*)0; { TY531289 LOC5; if (!(((NI) 8) < size0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_273), LOC5, 0); { NI i_547649_839829468; NI HEX3Atmp_547657_839829468; NI res_547660_839829468; i_547649_839829468 = (NI)0; HEX3Atmp_547657_839829468 = (NI)0; HEX3Atmp_547657_839829468 = (NI)(size0 - ((NI) 1)); res_547660_839829468 = ((NI) 0); { while (1) { TY177507 LOC19; NimStringDesc* LOC20; if (!(res_547660_839829468 <= HEX3Atmp_547657_839829468)) goto LA8; i_547649_839829468 = res_547660_839829468; { if (!(i_547649_839829468 < (NI)(size0 - ((NI) 1)))) goto LA11; { if (!(((NI) ((NI)((NI)(i_547649_839829468 + ((NI) 1)) % ((NI) 8)))) == ((NI) 0))) goto LA15; frmt0 = copyString(((NimStringDesc*) &T839829468_274)); } goto LA13; LA15: ; { frmt0 = copyString(((NimStringDesc*) &T839829468_275)); } LA13: ; } goto LA9; LA11: ; { frmt0 = copyString(((NimStringDesc*) &T839829468_276)); } LA9: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC20 = (NimStringDesc*)0; LOC20 = nsuToHex(((NI64)(NU64)(NU8)(cs0->data[i_547649_839829468])), ((NI) 2)); LOC19[0] = rope_177277_2381377266(LOC20); addf_178205_2381377266(&result0, frmt0, LOC19, 1); res_547660_839829468 += ((NI) 1); } LA8: ; } } } goto LA1; LA3: ; { NI64 LOC22; LOC22 = (NI64)0; LOC22 = bitsettoword_547578_839829468(cs0, size0); result0 = intliteral_537270_839829468(LOC22); } LA1: ; return result0; } N_NIMCALL(void, appcg_530640_839829468)(Tcgen527027* m0, Tcfilesection527005 s0, NimStringDesc* frmt0, Ropeobj177006** args0, NI args0Len0) { Ropeobj177006* LOC1; LOC1 = (Ropeobj177006*)0; LOC1 = ropecg_530407_839829468(m0, frmt0, args0, args0Len0); add_177482_2381377266(&(*m0).s[(s0)- 0], LOC1); } N_NIMCALL(Ropeobj177006*, genconstseq_557371_839829468)(Tcproc527021* p0, Tnode290802* n0, Ttype290840* t0) { Ropeobj177006* result0; Ropeobj177006* data0; TY177507 LOC1; NI LOC2; TY533235 LOC18; NI LOC19; TY530811 LOC20; result0 = (Ropeobj177006*)0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = len_291081_850551059(n0); LOC1[0] = rope_177401_2381377266(((NI64) (LOC2))); data0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_277), LOC1, 1); { NI LOC5; LOC5 = (NI)0; LOC5 = len_291081_850551059(n0); if (!(((NI) 0) < LOC5)) goto LA6; add_177487_2381377266(&data0, ((NimStringDesc*) &T839829468_278)); { NI i_557395_839829468; NI HEX3Atmp_557411_839829468; NI LOC9; NI res_557414_839829468; i_557395_839829468 = (NI)0; HEX3Atmp_557411_839829468 = (NI)0; LOC9 = (NI)0; LOC9 = len_291081_850551059(n0); HEX3Atmp_557411_839829468 = (NI)(LOC9 - ((NI) 1)); res_557414_839829468 = ((NI) 0); { while (1) { Ropeobj177006* LOC17; if (!(res_557414_839829468 <= HEX3Atmp_557411_839829468)) goto LA11; i_557395_839829468 = res_557414_839829468; { TY531289 LOC16; if (!(((NI) 0) < i_557395_839829468)) goto LA14; memset((void*)LOC16, 0, sizeof(LOC16)); addf_178205_2381377266(&data0, ((NimStringDesc*) &T839829468_279), LOC16, 0); } LA14: ; LOC17 = (Ropeobj177006*)0; LOC17 = genconstexpr_552849_839829468(p0, (*n0).kindU.S6.sons->data[i_557395_839829468]); add_177482_2381377266(&data0, LOC17); res_557414_839829468 += ((NI) 1); } LA11: ; } } add_177487_2381377266(&data0, ((NimStringDesc*) &T839829468_280)); } LA6: ; add_177487_2381377266(&data0, ((NimStringDesc*) &T839829468_280)); result0 = gettempname_531596_839829468((*p0).module); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = gettypedesc_533671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]); LOC19 = (NI)0; LOC19 = len_291081_850551059(n0); LOC18[1] = rope_177401_2381377266(((NI64) (LOC19))); LOC18[2] = result0; LOC18[3] = data0; appcg_530640_839829468((*p0).module, ((Tcfilesection527005) 8), ((NimStringDesc*) &T839829468_281), LOC18, 4); memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = gettypedesc_533671_839829468((*p0).module, t0); LOC20[1] = result0; result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_282), LOC20, 2); return result0; } N_NIMCALL(Ropeobj177006*, gennamedconstexpr_557284_839829468)(Tcproc527021* p0, Tnode290802* n0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { if (!((*n0).kind == ((Tnodekind290020) 34))) goto LA3; result0 = genconstexpr_552849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]); } goto LA1; LA3: ; { result0 = genconstexpr_552849_839829468(p0, n0); } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, genconstsimplelist_557299_839829468)(Tcproc527021* p0, Tnode290802* n0) { Ropeobj177006* result0; NI length0; TY531289 LOC10; result0 = (Ropeobj177006*)0; length0 = sonslen_293351_850551059(n0); result0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_223)); { NI i_557333_839829468; NI HEX3Atmp_557362_839829468; NI HEX3Atmp_557363_839829468; NI res_557366_839829468; i_557333_839829468 = (NI)0; HEX3Atmp_557362_839829468 = (NI)0; HEX3Atmp_557363_839829468 = (NI)0; HEX3Atmp_557362_839829468 = ((*n0).kind == ((Tnodekind290020) 38)); HEX3Atmp_557363_839829468 = (NI)(length0 - ((NI) 2)); res_557366_839829468 = ((NI) (HEX3Atmp_557362_839829468)); { while (1) { TY177507 LOC4; if (!(res_557366_839829468 <= HEX3Atmp_557363_839829468)) goto LA3; i_557333_839829468 = res_557366_839829468; memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = gennamedconstexpr_557284_839829468(p0, (*n0).kindU.S6.sons->data[i_557333_839829468]); addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_283), LOC4, 1); res_557366_839829468 += ((NI) 1); } LA3: ; } } { Ropeobj177006* LOC9; if (!(((NI) (((*n0).kind == ((Tnodekind290020) 38)))) < length0)) goto LA7; LOC9 = (Ropeobj177006*)0; LOC9 = gennamedconstexpr_557284_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))]); add_177482_2381377266(&result0, LOC9); } LA7: ; memset((void*)LOC10, 0, sizeof(LOC10)); addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_160), LOC10, 0); return result0; } N_NIMCALL(Ropeobj177006*, genconstexpr_552849_839829468)(Tcproc527021* p0, Tnode290802* n0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; switch ((*n0).kind) { case ((Tnodekind290020) 58): case ((Tnodekind290020) 59): { result0 = genconstexpr_552849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]); } break; case ((Tnodekind290020) 39): { Tbitset337004* cs0; NI64 LOC3; cs0 = (Tbitset337004*)0; tobitset_338001_452470228(n0, (&cs0)); LOC3 = (NI64)0; LOC3 = getsize_318135_3876443242((*n0).typ); result0 = genrawsetdata_547629_839829468(cs0, ((NI) (LOC3))); } break; case ((Tnodekind290020) 41): case ((Tnodekind290020) 37): case ((Tnodekind290020) 155): case ((Tnodekind290020) 38): { Ttype290840* t0; t0 = skiptypes_294099_850551059((*n0).typ, IL64(211106232576256)); { if (!((*t0).kind == ((Ttypekind290244) 24))) goto LA7; result0 = genconstseq_557371_839829468(p0, n0, t0); } goto LA5; LA7: ; { result0 = genconstsimplelist_557299_839829468(p0, n0); } LA5: ; } break; default: { Tloc290816 d0; memset((void*)(&d0), 0, sizeof(d0)); initlocexpr_537283_839829468(p0, n0, (&d0)); result0 = rdloc_536188_839829468(d0); } break; } return result0; } N_NIMCALL(void, requestconstimpl_537240_839829468)(Tcproc527021* p0, Tsym290834* sym0) { Tcgen527027* m0; Tcgen527027* q0; { m0 = (*p0).module; useheader_530369_839829468(m0, sym0); { Ropeobj177006* LOC5; if (!((*sym0).loc.k == ((Tlockind290808) 0))) goto LA3; LOC5 = (Ropeobj177006*)0; LOC5 = manglename_531205_839829468(sym0); fillloc_530282_839829468((&(*sym0).loc), ((Tlockind290808) 8), (*sym0).typ, LOC5, ((Tstorageloc290812) 1)); } LA3: ; { if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag290810) 3))&15U)))!=0)) goto LA8; goto BeforeRet; } LA8: ; q0 = findpendingmodule_530241_839829468(m0, sym0); { NIM_BOOL LOC12; NIM_BOOL LOC14; TY533238 LOC17; LOC12 = (NIM_BOOL)0; LOC12 = !((q0 == NIM_NIL)); if (!(LOC12)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_266862_2627731572((&(*q0).declaredthings), (*sym0).Sup.id); LOC12 = !(LOC14); LA13: ; if (!LOC12) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_533671_839829468(q0, (*sym0).typ); LOC17[1] = (*sym0).loc.r; LOC17[2] = genconstexpr_552849_839829468((*q0).initproc, (*sym0).ast); addf_178205_2381377266(&(*q0).s[(((Tcfilesection527005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3); } LA15: ; { NIM_BOOL LOC20; NIM_BOOL LOC22; Ropeobj177006* headerdecl0; TY530811 LOC25; LOC20 = (NIM_BOOL)0; LOC20 = !((q0 == m0)); if (!(LOC20)) goto LA21; LOC22 = (NIM_BOOL)0; LOC22 = containsorincl_266862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id); LOC20 = !(LOC22); LA21: ; if (!LOC20) goto LA23; memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = gettypedesc_533671_839829468(m0, (*sym0).loc.t); LOC25[1] = (*sym0).loc.r; headerdecl0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_284), LOC25, 2); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 8))- 0], headerdecl0); { NIM_BOOL LOC28; LOC28 = (NIM_BOOL)0; LOC28 = (((*sym0).flags &(1U<<((NU)(((Tsymflag290184) 6))&31U)))!=0); if (!(LOC28)) goto LA29; LOC28 = !((generatedheader_530201_839829468 == NIM_NIL)); LA29: ; if (!LOC28) goto LA30; add_177482_2381377266(&(*generatedheader_530201_839829468).s[(((Tcfilesection527005) 8))- 0], headerdecl0); } LA30: ; } LA23: ; }BeforeRet: ; } N_NIMCALL(void, gencomplexconst_556249_839829468)(Tcproc527021* p0, Tsym290834* sym0, Tloc290816* d0) { requestconstimpl_537240_839829468(p0, sym0); putlocintodest_537258_839829468(p0, d0, (*sym0).loc); } static N_INLINE(Ropeobj177006**, procsec_527194_3723162438)(Tcproc527021* p0, Tcprocsection527011 s0) { Ropeobj177006** result0; result0 = (Ropeobj177006**)0; result0 = &(*p0).blocks->data[((NI) 0)].sections[(s0)- 0]; return result0; } N_NIMCALL(void, accessthreadlocalvar_530945_839829468)(Tcproc527021* p0, Tsym290834* s0) { { NIM_BOOL LOC3; Ropeobj177006** LOC7; TY531289 LOC8; Ropeobj177006** LOC9; TY531289 LOC10; Ropeobj177006* LOC11; LOC3 = (NIM_BOOL)0; LOC3 = emulatedthreadvars_530949_839829468(); if (!(LOC3)) goto LA4; LOC3 = !((*p0).threadvaraccessed); LA4: ; if (!LOC3) goto LA5; (*p0).threadvaraccessed = NIM_TRUE; (*(*p0).module).flags |= ((NU8)1)<<((((Codegenflag527025) 1))%(sizeof(NU8)*8)); LOC7 = (Ropeobj177006**)0; LOC7 = procsec_527194_3723162438(p0, ((Tcprocsection527011) 0)); memset((void*)LOC8, 0, sizeof(LOC8)); addf_178205_2381377266(LOC7, ((NimStringDesc*) &T839829468_286), LOC8, 0); LOC9 = (Ropeobj177006**)0; LOC9 = procsec_527194_3723162438(p0, ((Tcprocsection527011) 1)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC11 = (Ropeobj177006*)0; LOC11 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_287), LOC10, 0); add_177482_2381377266(LOC9, LOC11); } LA5: ; } static N_INLINE(NIM_BOOL, isemptytype_295440_850551059)(Ttype290840* t0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = (t0 == NIM_NIL); if (LOC1) goto LA2; LOC1 = ((*t0).kind == ((Ttypekind290244) 62) || (*t0).kind == ((Ttypekind290244) 7)); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, putdataintodest_548436_839829468)(Tcproc527021* p0, Tloc290816* d0, Ttype290840* t0, Ropeobj177006* r0) { Tloc290816 a0; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind290808) 0)))) goto LA3; initloc_530273_839829468((&a0), ((Tlockind290808) 8), t0, ((Tstorageloc290812) 1)); a0.r = r0; { if (!(((*d0).flags &(1U<<((NU)(((Tlocflag290810) 2))&15U)))!=0)) goto LA7; genassignment_537264_839829468(p0, (*d0), a0, 0); } goto LA5; LA7: ; { genassignment_537264_839829468(p0, (*d0), a0, 1); } LA5: ; } goto LA1; LA3: ; { (*d0).k = ((Tlockind290808) 8); unsureAsgnRef((void**) (&(*d0).t), t0); unsureAsgnRef((void**) (&(*d0).r), r0); } LA1: ; } N_NIMCALL(NIM_BOOL, freshlineinfo_530818_839829468)(Tcproc527021* p0, Tlineinfo189336 info0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = !(((*p0).lastlineinfo.line == info0.line)); if (LOC3) goto LA4; LOC3 = !(((*p0).lastlineinfo.fileindex == info0.fileindex)); LA4: ; if (!LOC3) goto LA5; (*p0).lastlineinfo.line = info0.line; (*p0).lastlineinfo.fileindex = info0.fileindex; result0 = NIM_TRUE; } LA5: ; return result0; } N_NIMCALL(void, genlinedir_530823_839829468)(Tcproc527021* p0, Tnode290802* t0) { NI line0; Ropeobj177006** LOC11; NimStringDesc* LOC12; line0 = safelinenm_530721_839829468((*t0).info); { Ropeobj177006** LOC5; TY531289 LOC6; Ropeobj177006* LOC7; Ropeobj177006* LOC8; Ropeobj177006* LOC9; Ropeobj177006* LOC10; if (!((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 28))&63U)))!=0)) goto LA3; LOC5 = (Ropeobj177006**)0; LOC5 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); memset((void*)LOC6, 0, sizeof(LOC6)); LOC7 = (Ropeobj177006*)0; LOC7 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_293), LOC6, 0); LOC8 = (Ropeobj177006*)0; LOC8 = sourceline_190068_155036129((*t0).info); LOC9 = (Ropeobj177006*)0; LOC9 = HEX26_177418_2381377266(LOC7, LOC8); LOC10 = (Ropeobj177006*)0; LOC10 = HEX26_177418_2381377266(LOC9, rnl_177903_2381377266); add_177482_2381377266(LOC5, LOC10); } LA3: ; LOC11 = (Ropeobj177006**)0; LOC11 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); LOC12 = (NimStringDesc*)0; LOC12 = tofullpath_190264_155036129((*t0).info.fileindex); genclinedir_530725_839829468(LOC11, LOC12, line0); { NIM_BOOL LOC15; NIM_BOOL LOC17; LOC15 = (NIM_BOOL)0; LOC15 = ((163840 & (*p0).options) == 163840); if (!(LOC15)) goto LA16; LOC17 = (NIM_BOOL)0; LOC17 = ((*p0).prc == NIM_NIL); if (LOC17) goto LA18; LOC17 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag290184) 9))&31U)))!=0)); LA18: ; LOC15 = LOC17; LA16: ; if (!LOC15) goto LA19; { NIM_BOOL LOC23; TY530811 LOC26; NimStringDesc* LOC27; LOC23 = (NIM_BOOL)0; LOC23 = freshlineinfo_530818_839829468(p0, (*t0).info); if (!LOC23) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rope_177401_2381377266(((NI64) (line0))); LOC27 = (NimStringDesc*)0; LOC27 = tofilename_190260_155036129((*t0).info.fileindex); LOC26[1] = makecstring_189638_155036129(LOC27); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_294), LOC26, 2); } LA24: ; } goto LA13; LA19: ; { NIM_BOOL LOC29; NIM_BOOL LOC30; NIM_BOOL LOC32; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((98304 & (*p0).options) == 98304); if (!(LOC30)) goto LA31; LOC32 = (NIM_BOOL)0; LOC32 = ((*p0).prc == NIM_NIL); if (LOC32) goto LA33; LOC32 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag290184) 9))&31U)))!=0)); LA33: ; LOC30 = LOC32; LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA34; LOC29 = (((NI32) 0) <= (*t0).info.fileindex); LA34: ; if (!LOC29) goto LA35; { NIM_BOOL LOC39; TY530811 LOC42; LOC39 = (NIM_BOOL)0; LOC39 = freshlineinfo_530818_839829468(p0, (*t0).info); if (!LOC39) goto LA40; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = rope_177401_2381377266(((NI64) (line0))); LOC42[1] = quotedfilename_194818_155036129((*t0).info); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_295), LOC42, 2); } LA40: ; } goto LA13; LA35: ; LA13: ; } N_NIMCALL(Ropeobj177006*, getlabel_537217_839829468)(Tcproc527021* p0) { Ropeobj177006* result0; Ropeobj177006* LOC1; result0 = (Ropeobj177006*)0; (*p0).labels += ((NI) 1); LOC1 = (Ropeobj177006*)0; LOC1 = rope_177401_2381377266(((NI64) ((*p0).labels))); result0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_296), LOC1); return result0; } N_NIMCALL(void, fixlabel_537230_839829468)(Tcproc527021* p0, Ropeobj177006* labl0) { TY177507 LOC1; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = labl0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_299), LOC1, 1); } N_NIMCALL(void, genandor_552311_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 m0) { Ropeobj177006* L0; Tloc290816 tmp0; L0 = (Ropeobj177006*)0; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_535032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE); (*p0).splitdecls += ((NI) 1); expr_537248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); L0 = getlabel_537217_839829468(p0); { TY530811 LOC5; if (!(m0 == ((Tmagic290524) 127))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_536188_839829468(tmp0); LOC5[1] = L0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_297), LOC5, 2); } goto LA1; LA3: ; { TY530811 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_536188_839829468(tmp0); LOC7[1] = L0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_298), LOC7, 2); } LA1: ; expr_537248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&tmp0)); fixlabel_537230_839829468(p0, L0); { if (!((*d0).k == ((Tlockind290808) 0))) goto LA10; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI290816)); } goto LA8; LA10: ; { genassignment_537264_839829468(p0, (*d0), tmp0, 0); } LA8: ; (*p0).splitdecls -= ((NI) 1); } N_NIMCALL(void, unaryarith_550646_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 op0) { Tloc290816 a0; Ttype290840* t0; TY533238 LOC1; NI64 LOC2; Ropeobj177006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); t0 = (Ttype290840*)0; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_294099_850551059((*e0).typ, IL64(211106233624832)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_536188_839829468(a0); LOC2 = (NI64)0; LOC2 = getsize_318135_3876443242(t0); LOC1[1] = rope_177401_2381377266((NI64)(LOC2 * IL64(8))); LOC1[2] = getsimpletypedesc_531936_839829468((*p0).module, (*e0).typ); LOC3 = (Ropeobj177006*)0; LOC3 = HEX25_177905_2381377266(unarithtab_550653_839829468[(op0)- 99], LOC1, 3); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC3, ((Tstorageloc290812) 0)); } N_NIMCALL(void, unaryarithoverflow_549633_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 m0) { Tloc290816 a0; Ttype290840* t0; TY530811 LOC7; NI64 LOC8; Ropeobj177006* LOC9; memset((void*)(&a0), 0, sizeof(a0)); t0 = (Ttype290840*)0; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_294099_850551059((*e0).typ, IL64(211106233624832)); { TY530811 LOC5; NI64 LOC6; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 5))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_536188_839829468(a0); LOC6 = (NI64)0; LOC6 = firstord_318001_3876443242(t0); LOC5[1] = intliteral_537270_839829468(LOC6); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_317), LOC5, 2); } LA3: ; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_536188_839829468(a0); LOC8 = (NI64)0; LOC8 = getsize_318135_3876443242(t0); LOC7[1] = rope_177401_2381377266((NI64)(LOC8 * IL64(8))); LOC9 = (Ropeobj177006*)0; LOC9 = HEX25_177905_2381377266(opr_549640_839829468[(m0)- 96], LOC7, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC9, ((Tstorageloc290812) 0)); } N_NIMCALL(void, binaryarith_549819_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 op0) { Tloc290816 a0; Tloc290816 b0; NI64 s0; NI64 LOC1; NI64 LOC2; TY533235 LOC3; Ropeobj177006* LOC4; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); s0 = (NI64)0; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); LOC1 = (NI64)0; LOC1 = getsize_318135_3876443242(a0.t); LOC2 = (NI64)0; LOC2 = getsize_318135_3876443242(b0.t); s0 = (NI64)(((LOC1 >= LOC2) ? LOC1 : LOC2) * IL64(8)); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = rdloc_536188_839829468(a0); LOC3[1] = rdloc_536188_839829468(b0); LOC3[2] = rope_177401_2381377266(s0); LOC3[3] = getsimpletypedesc_531936_839829468((*p0).module, (*e0).typ); LOC4 = (Ropeobj177006*)0; LOC4 = HEX25_177905_2381377266(binarithtab_549826_839829468[(op0)- 52], LOC3, 4); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC4, ((Tstorageloc290812) 0)); } N_NIMCALL(void, binaryfloatarith_554728_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 m0) { { Tloc290816 a0; Tloc290816 b0; TY533235 LOC5; Tnode290802* LOC6; Ropeobj177006* LOC7; if (!!(((384 & (*p0).options) == 0))) goto LA3; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_177277_2381377266(opr_554762_839829468[(m0)- 52]); LOC5[1] = rdloc_536188_839829468(a0); LOC5[2] = rdloc_536188_839829468(b0); LOC6 = (Tnode290802*)0; LOC6 = HEX5BHEX5D_291238_850551059(e0, ((NI) 1)); LOC5[3] = getsimpletypedesc_531936_839829468((*p0).module, (*LOC6).typ); LOC7 = (Ropeobj177006*)0; LOC7 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_319), LOC5, 4); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc290812) 0)); { TY177507 LOC12; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 7))&31U)))!=0)) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_536188_839829468((*d0)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_323), LOC12, 1); } LA10: ; { TY177507 LOC17; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 8))&31U)))!=0)) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_536188_839829468((*d0)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_324), LOC17, 1); } LA15: ; } goto LA1; LA3: ; { binaryarith_549819_839829468(p0, e0, d0, m0); } LA1: ; } N_NIMCALL(void, geneqproc_550214_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 b0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { Ttype290840* LOC3; TY530811 LOC6; Ropeobj177006* LOC7; LOC3 = (Ttype290840*)0; LOC3 = skiptypes_294099_850551059(a0.t, IL64(211106232576256)); if (!((*LOC3).callconv == ((Tcallingconvention290002) 8))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rdloc_536188_839829468(a0); LOC6[1] = rdloc_536188_839829468(b0); LOC7 = (Ropeobj177006*)0; LOC7 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_352), LOC6, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc290812) 0)); } goto LA1; LA4: ; { TY530811 LOC9; Ropeobj177006* LOC10; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = rdloc_536188_839829468(a0); LOC9[1] = rdloc_536188_839829468(b0); LOC10 = (Ropeobj177006*)0; LOC10 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_341), LOC9, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC10, ((Tstorageloc290812) 0)); } LA1: ; } N_NIMCALL(Ropeobj177006*, rdcharloc_536227_839829468)(Tloc290816 a0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = rdloc_536188_839829468(a0); { Ttype290840* LOC3; TY177507 LOC6; LOC3 = (Ttype290840*)0; LOC3 = skiptypes_294099_850551059(a0.t, IL64(211106233624832)); if (!((*LOC3).kind == ((Ttypekind290244) 2))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = result0; result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_358), LOC6, 1); } LA4: ; return result0; } N_NIMCALL(Ropeobj177006*, binaryarithoverflowraw_549235_839829468)(Tcproc527021* p0, Ttype290840* t0, Tloc290816 a0, Tloc290816 b0, NimStringDesc* frmt0) { Ropeobj177006* result0; NI64 size0; Ropeobj177006* storage0; TY530811 LOC6; TY533238 LOC7; result0 = (Ropeobj177006*)0; size0 = getsize_318135_3876443242(t0); { if (!(size0 < ((NI64) (intsize_175641_4151366050)))) goto LA3; storage0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_36)); } goto LA1; LA3: ; { storage0 = gettypedesc_533671_839829468((*p0).module, t0); } LA1: ; result0 = gettempname_531596_839829468((*p0).module); memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = storage0; LOC6[1] = result0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 0), ((NimStringDesc*) &T839829468_54), LOC6, 2); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = result0; LOC7[1] = rdcharloc_536227_839829468(a0); LOC7[2] = rdcharloc_536227_839829468(b0); linecg_530707_839829468(p0, ((Tcprocsection527011) 2), frmt0, LOC7, 3); { NIM_BOOL LOC10; TY533238 LOC14; NI64 LOC15; NI64 LOC16; LOC10 = (NIM_BOOL)0; LOC10 = (size0 < ((NI64) (intsize_175641_4151366050))); if (LOC10) goto LA11; LOC10 = ((*t0).kind == ((Ttypekind290244) 20) || (*t0).kind == ((Ttypekind290244) 14)); LA11: ; if (!LOC10) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = result0; LOC15 = (NI64)0; LOC15 = firstord_318001_3876443242(t0); LOC14[1] = intliteral_537270_839829468(LOC15); LOC16 = (NI64)0; LOC16 = lastord_318004_3876443242(t0); LOC14[2] = intliteral_537270_839829468(LOC16); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_359), LOC14, 3); } LA12: ; return result0; } N_NIMCALL(void, binaryarithoverflow_549262_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 m0) { Tloc290816 a0; Tloc290816 b0; Ttype290840* t0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); t0 = skiptypes_294099_850551059((*e0).typ, IL64(211106233624832)); { Ropeobj177006* res0; TY533238 LOC5; if (!!((((*p0).options &(1U<<((NU)(((Toption168009) 5))&31U)))!=0))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = gettypedesc_533671_839829468((*p0).module, t0); LOC5[1] = rdloc_536188_839829468(a0); LOC5[2] = rdloc_536188_839829468(b0); res0 = HEX25_177905_2381377266(opr_549279_839829468[(m0)- 45], LOC5, 3); putintodest_548468_839829468(p0, d0, (*e0).typ, res0, ((Tstorageloc290812) 0)); } goto LA1; LA3: ; { Ropeobj177006* res0; NimStringDesc* LOC7; TY530811 LOC13; Ropeobj177006* LOC14; LOC7 = (NimStringDesc*)0; { if (!((*t0).kind == ((Ttypekind290244) 35))) goto LA10; LOC7 = copyString(prc64_549274_839829468[(m0)- 45]); } goto LA8; LA10: ; { LOC7 = copyString(prc_549269_839829468[(m0)- 45]); } LA8: ; res0 = binaryarithoverflowraw_549235_839829468(p0, t0, a0, b0, LOC7); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = gettypedesc_533671_839829468((*p0).module, t0); LOC13[1] = res0; LOC14 = (Ropeobj177006*)0; LOC14 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_370), LOC13, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC14, ((Tstorageloc290812) 0)); } LA1: ; } N_NIMCALL(Ropeobj177006*, lenfield_537305_839829468)(Tcproc527021* p0) { Ropeobj177006* result0; NimStringDesc* LOC1; result0 = (Ropeobj177006*)0; LOC1 = (NimStringDesc*)0; { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC4) goto LA5; LOC4 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA5: ; if (!LOC4) goto LA6; LOC1 = copyString(((NimStringDesc*) &T839829468_157)); } goto LA2; LA6: ; { LOC1 = copyString(((NimStringDesc*) &T839829468_158)); } LA2: ; result0 = rope_177277_2381377266(LOC1); return result0; } N_NIMCALL(void, gcusage_552439_839829468)(Tnode290802* n0) { { NimStringDesc* LOC5; if (!(gselectedgc_168133_2607990831 == ((Tgcmode168080) 0))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = rendertree_309044_382274130(n0, 0); message_194095_155036129((*n0).info, ((Tmsgkind189002) 263), LOC5); } LA3: ; } N_NIMCALL(void, genrepr_553339_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; Ttype290840* t0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); t0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); switch ((*t0).kind) { case ((Ttypekind290244) 31) ... ((Ttypekind290244) 35): case ((Ttypekind290244) 40) ... ((Ttypekind290244) 44): { TY177507 LOC2; Ropeobj177006* LOC3; memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_536188_839829468(a0); LOC3 = (Ropeobj177006*)0; LOC3 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_371), LOC2, 1); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC3, a0.s); } break; case ((Ttypekind290244) 36) ... ((Ttypekind290244) 39): { TY177507 LOC5; Ropeobj177006* LOC6; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_536188_839829468(a0); LOC6 = (Ropeobj177006*)0; LOC6 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_372), LOC5, 1); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC6, a0.s); } break; case ((Ttypekind290244) 1): { TY177507 LOC8; Ropeobj177006* LOC9; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_536188_839829468(a0); LOC9 = (Ropeobj177006*)0; LOC9 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_373), LOC8, 1); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC9, a0.s); } break; case ((Ttypekind290244) 2): { TY177507 LOC11; Ropeobj177006* LOC12; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdloc_536188_839829468(a0); LOC12 = (Ropeobj177006*)0; LOC12 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_374), LOC11, 1); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC12, a0.s); } break; case ((Ttypekind290244) 14): case ((Ttypekind290244) 15): { TY530811 LOC14; Ropeobj177006* LOC15; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_536188_839829468(a0); LOC14[1] = gentypeinfo_533941_839829468((*p0).module, t0); LOC15 = (Ropeobj177006*)0; LOC15 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_375), LOC14, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC15, a0.s); } break; case ((Ttypekind290244) 28): { TY177507 LOC17; Ropeobj177006* LOC18; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_536188_839829468(a0); LOC18 = (Ropeobj177006*)0; LOC18 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_376), LOC17, 1); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC18, a0.s); } break; case ((Ttypekind290244) 19): { TY530811 LOC20; Ropeobj177006* LOC21; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = addrloc_536204_839829468(a0); LOC20[1] = gentypeinfo_533941_839829468((*p0).module, t0); LOC21 = (Ropeobj177006*)0; LOC21 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_377), LOC20, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC21, a0.s); } break; case ((Ttypekind290244) 27): case ((Ttypekind290244) 48): { Tloc290816 b0; TY530811 LOC34; Ttype290840* LOC35; Ropeobj177006* LOC36; memset((void*)(&b0), 0, sizeof(b0)); switch ((*a0.t).kind) { case ((Ttypekind290244) 27): case ((Ttypekind290244) 48): { TY177507 LOC24; Ropeobj177006* LOC25; memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = rdloc_536188_839829468(a0); LOC25 = (Ropeobj177006*)0; LOC25 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_378), LOC24, 1); putintodest_548468_839829468(p0, (&b0), (*e0).typ, LOC25, a0.s); } break; case ((Ttypekind290244) 28): case ((Ttypekind290244) 24): { TY530811 LOC27; Ropeobj177006* LOC28; memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = rdloc_536188_839829468(a0); LOC27[1] = lenfield_537305_839829468(p0); LOC28 = (Ropeobj177006*)0; LOC28 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_379), LOC27, 2); putintodest_548468_839829468(p0, (&b0), (*e0).typ, LOC28, a0.s); } break; case ((Ttypekind290244) 16): case ((Ttypekind290244) 4): { TY530811 LOC30; NI64 LOC31; Ropeobj177006* LOC32; memset((void*)LOC30, 0, sizeof(LOC30)); LOC30[0] = rdloc_536188_839829468(a0); LOC31 = (NI64)0; LOC31 = lengthord_318007_3876443242(a0.t); LOC30[1] = rope_177401_2381377266(LOC31); LOC32 = (Ropeobj177006*)0; LOC32 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_380), LOC30, 2); putintodest_548468_839829468(p0, (&b0), (*e0).typ, LOC32, a0.s); } break; default: { internalerror_194100_155036129((*(*e0).kindU.S6.sons->data[((NI) 0)]).info, ((NimStringDesc*) &T839829468_381)); } break; } memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = rdloc_536188_839829468(b0); LOC35 = (Ttype290840*)0; LOC35 = elemtype_318394_3876443242(t0); LOC34[1] = gentypeinfo_533941_839829468((*p0).module, LOC35); LOC36 = (Ropeobj177006*)0; LOC36 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_382), LOC34, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC36, a0.s); } break; case ((Ttypekind290244) 29): case ((Ttypekind290244) 16): case ((Ttypekind290244) 4): case ((Ttypekind290244) 22): case ((Ttypekind290244) 21): case ((Ttypekind290244) 26): case ((Ttypekind290244) 5): case ((Ttypekind290244) 24): { TY530811 LOC38; Ropeobj177006* LOC39; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = rdloc_536188_839829468(a0); LOC38[1] = gentypeinfo_533941_839829468((*p0).module, t0); LOC39 = (Ropeobj177006*)0; LOC39 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC38, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC39, a0.s); } break; case ((Ttypekind290244) 3): case ((Ttypekind290244) 62): { localerror_194085_155036129((*e0).info, ((NimStringDesc*) &T839829468_384)); } break; default: { TY530811 LOC42; Ropeobj177006* LOC43; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = addrloc_536204_839829468(a0); LOC42[1] = gentypeinfo_533941_839829468((*p0).module, t0); LOC43 = (Ropeobj177006*)0; LOC43 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC42, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC43, a0.s); } break; } gcusage_552439_839829468(e0); } N_NIMCALL(void, gengettypeinfo_553383_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Ttype290840* t0; Ropeobj177006* LOC1; t0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); LOC1 = (Ropeobj177006*)0; LOC1 = gentypeinfo_533941_839829468((*p0).module, t0); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC1, ((Tstorageloc290812) 0)); } N_NIMCALL(void, genswap_553638_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 b0; Tloc290816 tmp0; Ttype290840* LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); LOC1 = (Ttype290840*)0; LOC1 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); gettemp_535032_839829468(p0, LOC1, (&tmp0), NIM_FALSE); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); genassignment_537264_839829468(p0, tmp0, a0, 0); genassignment_537264_839829468(p0, a0, b0, 0); genassignment_537264_839829468(p0, b0, tmp0, 0); } N_NIMCALL(void, unaryexpr_549209_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0) { Tloc290816 a0; TY177507 LOC1; Ropeobj177006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_536188_839829468(a0); LOC2 = (Ropeobj177006*)0; LOC2 = ropecg_530407_839829468((*p0).module, frmt0, LOC1, 1); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc290812) 0)); } N_NIMCALL(void, binarystmt_548501_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0) { Tloc290816 a0; Tloc290816 b0; TY530811 LOC5; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); { if (!!(((*d0).k == ((Tlockind290808) 0)))) goto LA3; internalerror_194100_155036129((*e0).info, ((NimStringDesc*) &T839829468_387)); } LA3: ; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_536188_839829468(a0); LOC5[1] = rdloc_536188_839829468(b0); linecg_530707_839829468(p0, ((Tcprocsection527011) 2), frmt0, LOC5, 2); } N_NIMCALL(void, genstrconcat_552452_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 tmp0; NI L0; Ropeobj177006* appends0; Ropeobj177006* lens0; TY533238 LOC21; Ropeobj177006** LOC22; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_535032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE); L0 = ((NI) 0); appends0 = NIM_NIL; lens0 = NIM_NIL; { NI i_552475_839829468; NI HEX3Atmp_552547_839829468; NI LOC2; NI res_552550_839829468; i_552475_839829468 = (NI)0; HEX3Atmp_552547_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_293351_850551059(e0); HEX3Atmp_552547_839829468 = (NI)(LOC2 - ((NI) 2)); res_552550_839829468 = ((NI) 0); { while (1) { if (!(res_552550_839829468 <= HEX3Atmp_552547_839829468)) goto LA4; i_552475_839829468 = res_552550_839829468; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_552475_839829468 + ((NI) 1))], (&a0)); { Ttype290840* LOC7; TY530811 LOC10; Ropeobj177006* LOC11; LOC7 = (Ttype290840*)0; LOC7 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_552475_839829468 + ((NI) 1))]).typ, IL64(211106242013440)); if (!((*LOC7).kind == ((Ttypekind290244) 2))) goto LA8; L0 += ((NI) 1); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = tmp0.r; LOC10[1] = rdloc_536188_839829468(a0); LOC11 = (Ropeobj177006*)0; LOC11 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2); add_177482_2381377266(&appends0, LOC11); } goto LA5; LA8: ; { TY530811 LOC19; Ropeobj177006* LOC20; { if (!((*(*e0).kindU.S6.sons->data[(NI)(i_552475_839829468 + ((NI) 1))]).kind >= ((Tnodekind290020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_552475_839829468 + ((NI) 1))]).kind <= ((Tnodekind290020) 22))) goto LA15; L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_552475_839829468 + ((NI) 1))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_552475_839829468 + ((NI) 1))]).kindU.S3.strval->Sup.len : 0); } goto LA13; LA15: ; { TY530811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_536188_839829468(a0); LOC18[1] = lenfield_537305_839829468(p0); addf_178205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2); } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = tmp0.r; LOC19[1] = rdloc_536188_839829468(a0); LOC20 = (Ropeobj177006*)0; LOC20 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2); add_177482_2381377266(&appends0, LOC20); } LA5: ; res_552550_839829468 += ((NI) 1); } LA4: ; } } memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = tmp0.r; LOC21[1] = lens0; LOC21[2] = rope_177401_2381377266(((NI64) (L0))); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_393), LOC21, 3); LOC22 = (Ropeobj177006**)0; LOC22 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); add_177482_2381377266(LOC22, appends0); { if (!((*d0).k == ((Tlockind290808) 0))) goto LA25; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI290816)); } goto LA23; LA25: ; { genassignment_537264_839829468(p0, (*d0), tmp0, 0); } LA23: ; gcusage_552439_839829468(e0); } N_NIMCALL(void, genstrappend_552554_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 dest0; Ropeobj177006* appends0; Ropeobj177006* lens0; NI L0; TY533238 LOC21; Ropeobj177006** LOC22; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&dest0), 0, sizeof(dest0)); appends0 = (Ropeobj177006*)0; lens0 = (Ropeobj177006*)0; L0 = ((NI) 0); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&dest0)); { NI i_552615_839829468; NI HEX3Atmp_552676_839829468; NI LOC2; NI res_552679_839829468; i_552615_839829468 = (NI)0; HEX3Atmp_552676_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_293351_850551059(e0); HEX3Atmp_552676_839829468 = (NI)(LOC2 - ((NI) 3)); res_552679_839829468 = ((NI) 0); { while (1) { if (!(res_552679_839829468 <= HEX3Atmp_552676_839829468)) goto LA4; i_552615_839829468 = res_552679_839829468; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_552615_839829468 + ((NI) 2))], (&a0)); { Ttype290840* LOC7; TY530811 LOC10; Ropeobj177006* LOC11; LOC7 = (Ttype290840*)0; LOC7 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_552615_839829468 + ((NI) 2))]).typ, IL64(211106242013440)); if (!((*LOC7).kind == ((Ttypekind290244) 2))) goto LA8; L0 += ((NI) 1); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_536188_839829468(dest0); LOC10[1] = rdloc_536188_839829468(a0); LOC11 = (Ropeobj177006*)0; LOC11 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2); add_177482_2381377266(&appends0, LOC11); } goto LA5; LA8: ; { TY530811 LOC19; Ropeobj177006* LOC20; { if (!((*(*e0).kindU.S6.sons->data[(NI)(i_552615_839829468 + ((NI) 2))]).kind >= ((Tnodekind290020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_552615_839829468 + ((NI) 2))]).kind <= ((Tnodekind290020) 22))) goto LA15; L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_552615_839829468 + ((NI) 2))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_552615_839829468 + ((NI) 2))]).kindU.S3.strval->Sup.len : 0); } goto LA13; LA15: ; { TY530811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_536188_839829468(a0); LOC18[1] = lenfield_537305_839829468(p0); addf_178205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2); } LA13: ; memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_536188_839829468(dest0); LOC19[1] = rdloc_536188_839829468(a0); LOC20 = (Ropeobj177006*)0; LOC20 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2); add_177482_2381377266(&appends0, LOC20); } LA5: ; res_552679_839829468 += ((NI) 1); } LA4: ; } } memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdloc_536188_839829468(dest0); LOC21[1] = lens0; LOC21[2] = rope_177401_2381377266(((NI64) (L0))); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_395), LOC21, 3); LOC22 = (Ropeobj177006**)0; LOC22 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); add_177482_2381377266(LOC22, appends0); gcusage_552439_839829468(e0); } N_NIMCALL(void, genseqelemappend_552683_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { NimStringDesc* seqappendpattern0; Tloc290816 a0; Tloc290816 b0; Tloc290816 dest0; Ttype290840* bt0; TY533238 LOC8; Ttype290840* LOC9; TY530811 LOC10; TY530811 LOC11; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_396)); } goto LA1; LA5: ; { seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_397)); } LA1: ; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&dest0), 0, sizeof(dest0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); bt0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 2)]).typ, IL64(211106240964864)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_536188_839829468(a0); LOC9 = (Ttype290840*)0; LOC9 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC8[1] = gettypedesc_533671_839829468((*p0).module, LOC9); LOC8[2] = gettypedesc_533671_839829468((*p0).module, bt0); linecg_530707_839829468(p0, ((Tcprocsection527011) 2), seqappendpattern0, LOC8, 3); initloc_530273_839829468((&dest0), ((Tlockind290808) 6), bt0, ((Tstorageloc290812) 3)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdloc_536188_839829468(a0); LOC10[1] = lenfield_537305_839829468(p0); dest0.r = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_398), LOC10, 2); genassignment_537264_839829468(p0, dest0, b0, 3); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdloc_536188_839829468(a0); LOC11[1] = lenfield_537305_839829468(p0); linecg_530707_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_399), LOC11, 2); gcusage_552439_839829468(e0); } N_NIMCALL(void, binaryexpr_548549_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0) { Tloc290816 a0; Tloc290816 b0; TY530811 LOC1; Ropeobj177006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_536188_839829468(a0); LOC1[1] = rdloc_536188_839829468(b0); LOC2 = (Ropeobj177006*)0; LOC2 = ropecg_530407_839829468((*p0).module, frmt0, LOC1, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc290812) 0)); } N_NIMCALL(void, genstrequals_554666_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 x0; Tnode290802* a0; Tnode290802* b0; memset((void*)(&x0), 0, sizeof(x0)); a0 = (*e0).kindU.S6.sons->data[((NI) 1)]; b0 = (*e0).kindU.S6.sons->data[((NI) 2)]; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*a0).kind == ((Tnodekind290020) 23)); if (LOC3) goto LA4; LOC3 = ((*b0).kind == ((Tnodekind290020) 23)); LA4: ; if (!LOC3) goto LA5; binaryexpr_548549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341)); } goto LA1; LA5: ; { NIM_BOOL LOC8; TY530811 LOC12; Ropeobj177006* LOC13; LOC8 = (NIM_BOOL)0; LOC8 = ((*a0).kind >= ((Tnodekind290020) 20) && (*a0).kind <= ((Tnodekind290020) 22)); if (!(LOC8)) goto LA9; LOC8 = (((*a0).kindU.S3.strval) && ((*a0).kindU.S3.strval)->Sup.len == 0); LA9: ; if (!LOC8) goto LA10; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&x0)); memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_536188_839829468(x0); LOC12[1] = lenfield_537305_839829468(p0); LOC13 = (Ropeobj177006*)0; LOC13 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC12, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC13, ((Tstorageloc290812) 0)); } goto LA1; LA10: ; { NIM_BOOL LOC15; TY530811 LOC19; Ropeobj177006* LOC20; LOC15 = (NIM_BOOL)0; LOC15 = ((*b0).kind >= ((Tnodekind290020) 20) && (*b0).kind <= ((Tnodekind290020) 22)); if (!(LOC15)) goto LA16; LOC15 = (((*b0).kindU.S3.strval) && ((*b0).kindU.S3.strval)->Sup.len == 0); LA16: ; if (!LOC15) goto LA17; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&x0)); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_536188_839829468(x0); LOC19[1] = lenfield_537305_839829468(p0); LOC20 = (Ropeobj177006*)0; LOC20 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC19, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC20, ((Tstorageloc290812) 0)); } goto LA1; LA17: ; { binaryexpr_548549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_401)); } LA1: ; } N_NIMCALL(void, genisnil_550620_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Ttype290840* t0; t0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*t0).kind == ((Ttypekind290244) 25)); if (!(LOC3)) goto LA4; LOC3 = ((*t0).callconv == ((Tcallingconvention290002) 8)); LA4: ; if (!LOC3) goto LA5; unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_404)); } goto LA1; LA5: ; { unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_405)); } LA1: ; } N_NIMCALL(void, gendollar_553391_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0, NimStringDesc* frmt0) { Tloc290816 a0; TY177507 LOC1; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_536188_839829468(a0); a0.r = ropecg_530407_839829468((*p0).module, frmt0, LOC1, 1); { if (!((*d0).k == ((Tlockind290808) 0))) goto LA4; gettemp_535032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA4: ; genassignment_537264_839829468(p0, (*d0), a0, 0); gcusage_552439_839829468(n0); } N_NIMCALL(Ropeobj177006*, genofhelper_553139_839829468)(Tcproc527021* p0, Ttype290840* dest0, Ropeobj177006* a0) { Ropeobj177006* result0; Ropeobj177006* ti0; result0 = (Ropeobj177006*)0; ti0 = gentypeinfo_533941_839829468((*p0).module, dest0); { NIM_BOOL LOC3; NIM_BOOL LOC5; TY530811 LOC9; LOC3 = (NIM_BOOL)0; LOC3 = (((*dest0).flags &(1U<<((NU)(((Ttypeflag290431) 2))&31U)))!=0); if (LOC3) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = (((*(*p0).module).flags &(1U<<((NU)(((Codegenflag527025) 5))&7U)))!=0); if (!(LOC5)) goto LA6; LOC5 = !((((*dest0).flags &(1U<<((NU)(((Ttypeflag290431) 5))&31U)))!=0)); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = a0; LOC9[1] = ti0; result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_414), LOC9, 2); } goto LA1; LA7: ; { Ropeobj177006* LOC11; Ropeobj177006* cache0; Ropeobj177006* LOC12; TY177507 LOC13; TY533238 LOC14; LOC11 = (Ropeobj177006*)0; LOC11 = cgsym_530403_839829468((*p0).module, ((NimStringDesc*) &T839829468_129)); (*(*p0).module).labels += ((NI) 1); LOC12 = (Ropeobj177006*)0; LOC12 = rope_177401_2381377266(((NI64) ((*(*p0).module).labels))); cache0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_415), LOC12); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = cache0; addf_178205_2381377266(&(*(*p0).module).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_416), LOC13, 1); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = a0; LOC14[1] = ti0; LOC14[2] = cache0; result0 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_417), LOC14, 3); } LA1: ; return result0; } N_NIMCALL(void, genof_553201_839829468)(Tcproc527021* p0, Tnode290802* x0, Ttype290840* typ0, Tloc290816* d0) { Tloc290816 a0; Ttype290840* dest0; Ropeobj177006* r0; Ropeobj177006* nilcheck0; Ttype290840* t0; Ttype290840* LOC41; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, x0, (&a0)); dest0 = skiptypes_294099_850551059(typ0, IL64(211106247256320)); r0 = rdloc_536188_839829468(a0); nilcheck0 = NIM_NIL; t0 = skiptypes_294099_850551059(a0.t, IL64(211106232576256)); { while (1) { Ttype290840* LOC16; if (!((*t0).kind == ((Ttypekind290244) 23) || (*t0).kind == ((Ttypekind290244) 21) || (*t0).kind == ((Ttypekind290244) 22))) goto LA2; { if (!!(((*t0).kind == ((Ttypekind290244) 23)))) goto LA5; nilcheck0 = r0; } LA5: ; { NIM_BOOL LOC9; NIM_BOOL LOC11; TY177507 LOC15; LOC9 = (NIM_BOOL)0; LOC9 = !(((*t0).kind == ((Ttypekind290244) 23))); if (LOC9) goto LA10; LOC11 = (NIM_BOOL)0; LOC11 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC11) goto LA12; LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA12: ; LOC9 = !(LOC11); LA10: ; if (!LOC9) goto LA13; memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = r0; r0 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC15, 1); } LA13: ; LOC16 = (Ttype290840*)0; LOC16 = lastson_293377_850551059(t0); t0 = skiptypes_294099_850551059(LOC16, IL64(211106232576256)); } LA2: ; } { NIM_BOOL LOC19; LOC19 = (NIM_BOOL)0; LOC19 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC19) goto LA20; LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA20: ; if (!!(LOC19)) goto LA21; { while (1) { NIM_BOOL LOC25; TY531289 LOC27; Ropeobj177006* LOC28; LOC25 = (NIM_BOOL)0; LOC25 = ((*t0).kind == ((Ttypekind290244) 17)); if (!(LOC25)) goto LA26; LOC25 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); LA26: ; if (!LOC25) goto LA24; memset((void*)LOC27, 0, sizeof(LOC27)); LOC28 = (Ropeobj177006*)0; LOC28 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_153), LOC27, 0); add_177482_2381377266(&r0, LOC28); t0 = skiptypes_294099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360)); } LA24: ; } } LA21: ; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = isobjlackingtypefield_531513_839829468(t0); if (!LOC31) goto LA32; globalerror_194071_155036129((*x0).info, ((Tmsgkind189002) 4), ((NimStringDesc*) &T839829468_412)); } LA32: ; { TY530811 LOC38; if (!!((nilcheck0 == NIM_NIL))) goto LA36; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = nilcheck0; LOC38[1] = genofhelper_553139_839829468(p0, dest0, r0); r0 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_413), LOC38, 2); } goto LA34; LA36: ; { TY177507 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = genofhelper_553139_839829468(p0, dest0, r0); r0 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_418), LOC40, 1); } LA34: ; LOC41 = (Ttype290840*)0; LOC41 = getsystype_336150_3937434831(((Ttypekind290244) 1)); putintodest_548468_839829468(p0, d0, LOC41, r0, a0.s); } N_NIMCALL(void, genof_553331_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { genof_553201_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (*(*n0).kindU.S6.sons->data[((NI) 2)]).typ, d0); } N_NIMCALL(void, rawgennew_552741_839829468)(Tcproc527021* p0, Tloc290816 a0, Ropeobj177006* sizeexpr_552745_839829468) { Ropeobj177006* sizeexpr0; Ttype290840* reftype0; Tloc290816 b0; TY533238 args0; Ttype290840* bt0; sizeexpr0 = sizeexpr_552745_839829468; reftype0 = skiptypes_294099_850551059(a0.t, IL64(211106242013440)); memset((void*)(&b0), 0, sizeof(b0)); initloc_530273_839829468((&b0), ((Tlockind290808) 6), a0.t, ((Tstorageloc290812) 3)); { TY177507 LOC5; Ttype290840* LOC6; if (!sizeexpr0 == 0) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (Ttype290840*)0; LOC6 = skiptypes_294099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832)); LOC5[0] = gettypedesc_533671_839829468((*p0).module, LOC6); sizeexpr0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_419), LOC5, 1); } LA3: ; memset((void*)args0, 0, sizeof(args0)); args0[0] = gettypedesc_533671_839829468((*p0).module, reftype0); args0[1] = gentypeinfo_533941_839829468((*p0).module, reftype0); args0[2] = sizeexpr0; { NIM_BOOL LOC9; TY530811 LOC21; LOC9 = (NIM_BOOL)0; LOC9 = (a0.s == ((Tstorageloc290812) 3)); if (!(LOC9)) goto LA10; LOC9 = usesnativegc_168177_2607990831(); LA10: ; if (!LOC9) goto LA11; { NIM_BOOL LOC15; TY177507 LOC18; LOC15 = (NIM_BOOL)0; LOC15 = canformacycle_318123_3876443242(a0.t); if (!LOC15) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_536188_839829468(a0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_420), LOC18, 1); } goto LA13; LA16: ; { TY177507 LOC20; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rdloc_536188_839829468(a0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_255), LOC20, 1); } LA13: ; b0.r = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_421), args0, 3); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdloc_536188_839829468(a0); LOC21[1] = rdloc_536188_839829468(b0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC21, 2); } goto LA7; LA11: ; { b0.r = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_422), args0, 3); genassignment_537264_839829468(p0, a0, b0, 0); } LA7: ; bt0 = skiptypes_294099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832)); genobjectinit_536242_839829468(p0, ((Tcprocsection527011) 2), bt0, a0, NIM_FALSE); } N_NIMCALL(void, gennew_552782_839829468)(Tcproc527021* p0, Tnode290802* e0) { Tloc290816 a0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); { NI LOC3; Tloc290816 se0; Ropeobj177006* LOC6; LOC3 = (NI)0; LOC3 = len_291081_850551059(e0); if (!(LOC3 == ((NI) 3))) goto LA4; memset((void*)(&se0), 0, sizeof(se0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&se0)); LOC6 = (Ropeobj177006*)0; LOC6 = rdloc_536188_839829468(se0); rawgennew_552741_839829468(p0, a0, LOC6); } goto LA1; LA4: ; { rawgennew_552741_839829468(p0, a0, NIM_NIL); } LA1: ; gcusage_552439_839829468(e0); } N_NIMCALL(void, gennewfinalize_553110_839829468)(Tcproc527021* p0, Tnode290802* e0) { Tloc290816 a0; Tloc290816 b0; Tloc290816 f0; Ttype290840* reftype0; Ttype290840* bt0; Ropeobj177006* ti0; TY530811 LOC1; TY533238 LOC2; Ttype290840* LOC3; Ttype290840* LOC4; Ttype290840* LOC5; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&f0), 0, sizeof(f0)); reftype0 = (Ttype290840*)0; bt0 = (Ttype290840*)0; ti0 = (Ropeobj177006*)0; reftype0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&f0)); initloc_530273_839829468((&b0), ((Tlockind290808) 6), a0.t, ((Tstorageloc290812) 3)); ti0 = gentypeinfo_533941_839829468((*p0).module, reftype0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = ti0; LOC1[1] = rdloc_536188_839829468(f0); addf_178205_2381377266(&(*(*p0).module).s[(((Tcfilesection527005) 14))- 0], ((NimStringDesc*) &T839829468_423), LOC1, 2); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = gettypedesc_533671_839829468((*p0).module, reftype0); LOC2[1] = ti0; LOC3 = (Ttype290840*)0; LOC3 = lastson_293377_850551059(reftype0); LOC4 = (Ttype290840*)0; LOC4 = skiptypes_294099_850551059(LOC3, IL64(211106233624832)); LOC2[2] = gettypedesc_533671_839829468((*p0).module, LOC4); b0.r = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_424), LOC2, 3); genassignment_537264_839829468(p0, a0, b0, 0); LOC5 = (Ttype290840*)0; LOC5 = lastson_293377_850551059(reftype0); bt0 = skiptypes_294099_850551059(LOC5, IL64(211106233624832)); genobjectinit_536242_839829468(p0, ((Tcprocsection527011) 2), bt0, a0, NIM_FALSE); gcusage_552439_839829468(e0); } N_NIMCALL(void, gennewseqaux_552795_839829468)(Tcproc527021* p0, Tloc290816 dest0, Ropeobj177006* length0) { Ttype290840* seqtype0; TY533238 args0; Tloc290816 call0; seqtype0 = skiptypes_294099_850551059(dest0.t, IL64(211106242013440)); memset((void*)args0, 0, sizeof(args0)); args0[0] = gettypedesc_533671_839829468((*p0).module, seqtype0); args0[1] = gentypeinfo_533941_839829468((*p0).module, seqtype0); args0[2] = length0; memset((void*)(&call0), 0, sizeof(call0)); initloc_530273_839829468((&call0), ((Tlockind290808) 6), dest0.t, ((Tstorageloc290812) 3)); { NIM_BOOL LOC3; TY530811 LOC15; LOC3 = (NIM_BOOL)0; LOC3 = (dest0.s == ((Tstorageloc290812) 3)); if (!(LOC3)) goto LA4; LOC3 = usesnativegc_168177_2607990831(); LA4: ; if (!LOC3) goto LA5; { NIM_BOOL LOC9; TY177507 LOC12; LOC9 = (NIM_BOOL)0; LOC9 = canformacycle_318123_3876443242(dest0.t); if (!LOC9) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_536188_839829468(dest0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_420), LOC12, 1); } goto LA7; LA10: ; { TY177507 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_536188_839829468(dest0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_255), LOC14, 1); } LA7: ; call0.r = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_425), args0, 3); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = rdloc_536188_839829468(dest0); LOC15[1] = rdloc_536188_839829468(call0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC15, 2); } goto LA1; LA5: ; { call0.r = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_426), args0, 3); genassignment_537264_839829468(p0, dest0, call0, 0); } LA1: ; } N_NIMCALL(void, gennewseq_552824_839829468)(Tcproc527021* p0, Tnode290802* e0) { Tloc290816 a0; Tloc290816 b0; Ropeobj177006* LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); LOC1 = (Ropeobj177006*)0; LOC1 = rdloc_536188_839829468(b0); gennewseqaux_552795_839829468(p0, a0, LOC1); gcusage_552439_839829468(e0); } N_NIMCALL(void, gennewseqofcap_552836_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Ttype290840* seqtype0; Tloc290816 a0; TY533238 LOC1; Ropeobj177006* LOC2; seqtype0 = skiptypes_294099_850551059((*e0).typ, IL64(211106242013440)); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = gettypedesc_533671_839829468((*p0).module, seqtype0); LOC1[1] = gentypeinfo_533941_839829468((*p0).module, seqtype0); LOC1[2] = rdloc_536188_839829468(a0); LOC2 = (Ropeobj177006*)0; LOC2 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_427), LOC1, 3); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc290812) 0)); gcusage_552439_839829468(e0); } N_NIMCALL(Ropeobj177006*, getclosuretype_533683_839829468)(Tcgen527027* m0, Ttype290840* t0, Tclosuretypekind533679 kind0) { Ropeobj177006* result0; Intset266030 check0; Ropeobj177006* rettype0; Ropeobj177006* desc0; result0 = (Ropeobj177006*)0; memset((void*)(&check0), 0, sizeof(check0)); chckNil((void*)(&check0)); memset((void*)(&check0), 0, sizeof(check0)); initintset_266885_2627731572((&check0)); result0 = gettempname_531596_839829468(m0); rettype0 = (Ropeobj177006*)0; desc0 = (Ropeobj177006*)0; genprocparams_532115_839829468(m0, t0, &rettype0, &desc0, (&check0), !((kind0 == ((Tclosuretypekind533679) 0))), NIM_FALSE); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isimportedtype_531449_839829468(t0); if (!!(LOC3)) goto LA4; { NIM_BOOL LOC8; TY533235 LOC12; LOC8 = (NIM_BOOL)0; LOC8 = !(((*t0).callconv == ((Tcallingconvention290002) 8))); if (LOC8) goto LA9; LOC8 = !((kind0 == ((Tclosuretypekind533679) 2))); LA9: ; if (!LOC8) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_177277_2381377266(Callingconvtostr_531585_839829468[((*t0).callconv)- 0]); LOC12[1] = rettype0; LOC12[2] = result0; LOC12[3] = desc0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC12, 4); } goto LA6; LA10: ; { TY533238 LOC14; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = result0; LOC14[1] = rettype0; LOC14[2] = desc0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC14, 3); } LA6: ; } LA4: ; return result0; } N_NIMCALL(void, gensomecast_554480_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; Ttype290840* etyp0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); etyp0 = skiptypes_294099_850551059((*e0).typ, IL64(211106233624832)); { NIM_BOOL LOC3; TY530811 LOC7; Ropeobj177006* LOC8; LOC3 = (NIM_BOOL)0; LOC3 = ((*etyp0).kind == ((Ttypekind290244) 18) || (*etyp0).kind == ((Ttypekind290244) 17) || (*etyp0).kind == ((Ttypekind290244) 16) || (*etyp0).kind == ((Ttypekind290244) 27) || (*etyp0).kind == ((Ttypekind290244) 48) || (*etyp0).kind == ((Ttypekind290244) 4)); if (!(LOC3)) goto LA4; LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag290810) 0))&15U)))!=0)); LA4: ; if (!LOC3) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_533671_839829468((*p0).module, (*e0).typ); LOC7[1] = addrloc_536204_839829468(a0); LOC8 = (Ropeobj177006*)0; LOC8 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_429), LOC7, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC8, a0.s); } goto LA1; LA5: ; { NIM_BOOL LOC10; TY530811 LOC14; Ropeobj177006* LOC15; LOC10 = (NIM_BOOL)0; LOC10 = ((*etyp0).kind == ((Ttypekind290244) 25)); if (!(LOC10)) goto LA11; LOC10 = ((*etyp0).callconv == ((Tcallingconvention290002) 8)); LA11: ; if (!LOC10) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = getclosuretype_533683_839829468((*p0).module, etyp0, ((Tclosuretypekind533679) 1)); LOC14[1] = rdcharloc_536227_839829468(a0); LOC15 = (Ropeobj177006*)0; LOC15 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_430), LOC14, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC15, a0.s); } goto LA1; LA12: ; { TY530811 LOC17; Ropeobj177006* LOC18; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_533671_839829468((*p0).module, (*e0).typ); LOC17[1] = rdcharloc_536227_839829468(a0); LOC18 = (Ropeobj177006*)0; LOC18 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_430), LOC17, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC18, a0.s); } LA1: ; } N_NIMCALL(void, unaryexprchar_549222_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0) { Tloc290816 a0; TY177507 LOC1; Ropeobj177006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdcharloc_536227_839829468(a0); LOC2 = (Ropeobj177006*)0; LOC2 = ropecg_530407_839829468((*p0).module, frmt0, LOC1, 1); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc290812) 0)); } N_NIMCALL(void, genord_554474_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { unaryexprchar_549222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_301)); } N_NIMCALL(void, genarraylen_553415_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 op0) { Tnode290802* a0; Ttype290840* typ0; a0 = (*e0).kindU.S6.sons->data[((NI) 1)]; { if (!((*a0).kind == ((Tnodekind290020) 64))) goto LA3; a0 = (*a0).kindU.S6.sons->data[((NI) 0)]; } LA3: ; typ0 = skiptypes_294099_850551059((*a0).typ, IL64(211106240964864)); switch ((*typ0).kind) { case ((Ttypekind290244) 27): case ((Ttypekind290244) 48): { { if (!(op0 == ((Tmagic290524) 8))) goto LA8; unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_431)); } goto LA6; LA8: ; { unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_432)); } LA6: ; } break; case ((Ttypekind290244) 29): { usestringh_530345_839829468((*p0).module); { if (!(op0 == ((Tmagic290524) 8))) goto LA14; unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_433)); } goto LA12; LA14: ; { unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_434)); } LA12: ; } break; case ((Ttypekind290244) 28): case ((Ttypekind290244) 24): { { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC20) goto LA21; LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA21: ; if (!!(LOC20)) goto LA22; { if (!(op0 == ((Tmagic290524) 8))) goto LA26; unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_435)); } goto LA24; LA26: ; { unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_436)); } LA24: ; } goto LA18; LA22: ; { { if (!(op0 == ((Tmagic290524) 8))) goto LA32; unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_437)); } goto LA30; LA32: ; { unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_438)); } LA30: ; } LA18: ; } break; case ((Ttypekind290244) 16): case ((Ttypekind290244) 4): { { NI64 LOC40; Ropeobj177006* LOC41; if (!(op0 == ((Tmagic290524) 8))) goto LA38; LOC40 = (NI64)0; LOC40 = lastord_318004_3876443242(typ0); LOC41 = (Ropeobj177006*)0; LOC41 = rope_177401_2381377266(LOC40); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC41, ((Tstorageloc290812) 0)); } goto LA36; LA38: ; { NI64 LOC43; Ropeobj177006* LOC44; LOC43 = (NI64)0; LOC43 = lengthord_318007_3876443242(typ0); LOC44 = (Ropeobj177006*)0; LOC44 = rope_177401_2381377266(LOC43); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC44, ((Tstorageloc290812) 0)); } LA36: ; } break; default: { internalerror_194100_155036129((*e0).info, ((NimStringDesc*) &T839829468_439)); } break; } } N_NIMCALL(void, unarystmt_548527_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0) { Tloc290816 a0; TY177507 LOC5; memset((void*)(&a0), 0, sizeof(a0)); { if (!!(((*d0).k == ((Tlockind290808) 0)))) goto LA3; internalerror_194100_155036129((*e0).info, ((NimStringDesc*) &T839829468_442)); } LA3: ; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_536188_839829468(a0); linecg_530707_839829468(p0, ((Tcprocsection527011) 2), frmt0, LOC5, 1); } N_NIMCALL(void, gensetlengthstr_553632_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { binarystmt_548501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_445)); gcusage_552439_839829468(e0); } N_NIMCALL(void, gensetlengthseq_553500_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 b0; Ttype290840* t0; NimStringDesc* setlenpattern0; TY533235 LOC8; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); t0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA4: ; if (!!(LOC3)) goto LA5; setlenpattern0 = copyString(((NimStringDesc*) &T839829468_446)); } goto LA1; LA5: ; { setlenpattern0 = copyString(((NimStringDesc*) &T839829468_447)); } LA1: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_536188_839829468(a0); LOC8[1] = rdloc_536188_839829468(b0); LOC8[2] = gettypedesc_533671_839829468((*p0).module, t0); LOC8[3] = gettypedesc_533671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]); linecg_530707_839829468(p0, ((Tcprocsection527011) 2), setlenpattern0, LOC8, 4); gcusage_552439_839829468(e0); } N_NIMCALL(Ropeobj177006*, rdsetelemloc_553662_839829468)(Tloc290816 a0, Ttype290840* settype0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = rdcharloc_536227_839829468(a0); { NI64 LOC3; TY530811 LOC6; NI64 LOC7; LOC3 = (NI64)0; LOC3 = firstord_318001_3876443242(settype0); if (!!((LOC3 == IL64(0)))) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = result0; LOC7 = (NI64)0; LOC7 = firstord_318001_3876443242(settype0); LOC6[1] = rope_177401_2381377266(LOC7); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_448), LOC6, 2); } LA4: ; return result0; } N_NIMCALL(void, binarystmtinexcl_553857_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0) { Tloc290816 a0; Tloc290816 b0; TY530811 LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_536188_839829468(a0); LOC1[1] = rdsetelemloc_553662_839829468(b0, a0.t); linef_530700_839829468(p0, ((Tcprocsection527011) 2), frmt0, LOC1, 2); } N_NIMCALL(void, binaryexprchar_548809_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NimStringDesc* frmt0) { Tloc290816 a0; Tloc290816 b0; TY530811 LOC1; Ropeobj177006* LOC2; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdcharloc_536227_839829468(a0); LOC1[1] = rdcharloc_536227_839829468(b0); LOC2 = (Ropeobj177006*)0; LOC2 = ropecg_530407_839829468((*p0).module, frmt0, LOC1, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc290812) 0)); } N_NIMCALL(NIM_BOOL, fewcmps_553803_839829468)(Tnode290802* s0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { if (!!(((*s0).kind == ((Tnodekind290020) 39)))) goto LA3; internalerror_194100_155036129((*s0).info, ((NimStringDesc*) &T839829468_463)); } LA3: ; { NIM_BOOL LOC7; NI64 LOC8; LOC7 = (NIM_BOOL)0; LOC8 = (NI64)0; LOC8 = getsize_318135_3876443242((*s0).typ); LOC7 = (LOC8 <= ((NI64) (intsize_175641_4151366050))); if (!(LOC7)) goto LA9; LOC7 = (((*s0).flags &(1U<<((NU)(((Tnodeflag290427) 4))&15U)))!=0); LA9: ; if (!LOC7) goto LA10; result0 = NIM_FALSE; } goto LA5; LA10: ; { Ttype290840* LOC13; LOC13 = (Ttype290840*)0; LOC13 = elemtype_318394_3876443242((*s0).typ); if (!((*LOC13).kind == ((Ttypekind290244) 31) || (*LOC13).kind >= ((Ttypekind290244) 33) && (*LOC13).kind <= ((Ttypekind290244) 35))) goto LA14; result0 = NIM_TRUE; } goto LA5; LA14: ; { NI LOC17; LOC17 = (NI)0; LOC17 = sonslen_293351_850551059(s0); result0 = (LOC17 <= ((NI) 8)); } LA5: ; return result0; } N_NIMCALL(void, binaryexprin_553837_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* a0, Tloc290816* b0, Tloc290816* d0, NimStringDesc* frmt0) { TY530811 LOC1; Ropeobj177006* LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_536188_839829468((*a0)); LOC1[1] = rdsetelemloc_553662_839829468((*b0), (*a0).t); LOC2 = (Ropeobj177006*)0; LOC2 = HEX25_177905_2381377266(frmt0, LOC1, 2); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc290812) 0)); } N_NIMCALL(void, geninexpraux_551496_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* a0, Tloc290816* b0, Tloc290816* d0) { Ttype290840* LOC1; NI64 LOC2; LOC1 = (Ttype290840*)0; LOC1 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC2 = (NI64)0; LOC2 = getsize_318135_3876443242(LOC1); switch (((NI) (LOC2))) { case ((NI) 1): { binaryexprin_553837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_467)); } break; case ((NI) 2): { binaryexprin_553837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_468)); } break; case ((NI) 4): { binaryexprin_553837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_469)); } break; case ((NI) 8): { binaryexprin_553837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_470)); } break; default: { binaryexprin_553837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_471)); } break; } } N_NIMCALL(void, geninop_554009_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 b0; Tloc290816 x0; Tloc290816 y0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&x0), 0, sizeof(x0)); memset((void*)(&y0), 0, sizeof(y0)); { NIM_BOOL LOC3; Tnode290802* ea0; NI length0; LOC3 = (NIM_BOOL)0; LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind290020) 39)); if (!(LOC3)) goto LA4; LOC3 = fewcmps_553803_839829468((*e0).kindU.S6.sons->data[((NI) 1)]); LA4: ; if (!LOC3) goto LA5; { if (!((*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind290020) 70) || (*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind290020) 69))) goto LA9; ea0 = (*(*e0).kindU.S6.sons->data[((NI) 2)]).kindU.S6.sons->data[((NI) 0)]; } goto LA7; LA9: ; { ea0 = (*e0).kindU.S6.sons->data[((NI) 2)]; } LA7: ; initlocexpr_537283_839829468(p0, ea0, (&a0)); initloc_530273_839829468((&b0), ((Tlockind290808) 6), (*e0).typ, ((Tstorageloc290812) 0)); b0.r = rope_177277_2381377266(((NimStringDesc*) &T839829468_118)); length0 = sonslen_293351_850551059((*e0).kindU.S6.sons->data[((NI) 1)]); { NI i_554061_839829468; NI HEX3Atmp_554412_839829468; NI res_554415_839829468; i_554061_839829468 = (NI)0; HEX3Atmp_554412_839829468 = (NI)0; HEX3Atmp_554412_839829468 = (NI)(length0 - ((NI) 1)); res_554415_839829468 = ((NI) 0); { while (1) { if (!(res_554415_839829468 <= HEX3Atmp_554412_839829468)) goto LA14; i_554061_839829468 = res_554415_839829468; { TY533238 LOC19; if (!((*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_554061_839829468]).kind == ((Tnodekind290020) 44))) goto LA17; initlocexpr_537283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_554061_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0)); initlocexpr_537283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_554061_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0)); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdcharloc_536227_839829468(a0); LOC19[1] = rdcharloc_536227_839829468(x0); LOC19[2] = rdcharloc_536227_839829468(y0); addf_178205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_464), LOC19, 3); } goto LA15; LA17: ; { TY530811 LOC21; initlocexpr_537283_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_554061_839829468], (&x0)); memset((void*)LOC21, 0, sizeof(LOC21)); LOC21[0] = rdcharloc_536227_839829468(a0); LOC21[1] = rdcharloc_536227_839829468(x0); addf_178205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_465), LOC21, 2); } LA15: ; { if (!(i_554061_839829468 < (NI)(length0 - ((NI) 1)))) goto LA24; add_177487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_466)); } LA24: ; res_554415_839829468 += ((NI) 1); } LA14: ; } } add_177487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_117)); putintodest_548468_839829468(p0, d0, (*e0).typ, b0.r, ((Tstorageloc290812) 0)); } goto LA1; LA5: ; { initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); geninexpraux_551496_839829468(p0, e0, (&a0), (&b0), d0); } LA1: ; } N_NIMCALL(void, gensetop_554419_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 op0) { Tloc290816 a0; Tloc290816 b0; Tloc290816 i0; Ttype290840* settype0; NI size0; NI64 LOC1; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&i0), 0, sizeof(i0)); settype0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864)); LOC1 = (NI64)0; LOC1 = getsize_318135_3876443242(settype0); size0 = ((NI) (LOC1)); switch (size0) { case ((NI) 1): case ((NI) 2): case ((NI) 4): case ((NI) 8): { switch (op0) { case ((Tmagic290524) 39): { NimStringDesc* ts0; NimStringDesc* LOC4; NimStringDesc* LOC5; NimStringDesc* LOC6; LOC4 = (NimStringDesc*)0; LOC5 = (NimStringDesc*)0; LOC5 = nimIntToStr((NI)(size0 * ((NI) 8))); LOC4 = rawNewString(LOC5->Sup.len + 2); appendString(LOC4, ((NimStringDesc*) &T839829468_45)); appendString(LOC4, LOC5); ts0 = LOC4; LOC6 = (NimStringDesc*)0; LOC6 = rawNewString(ts0->Sup.len + ts0->Sup.len + 35); appendString(LOC6, ((NimStringDesc*) &T839829468_449)); appendString(LOC6, ts0); appendString(LOC6, ((NimStringDesc*) &T839829468_450)); appendString(LOC6, ts0); appendString(LOC6, ((NimStringDesc*) &T839829468_451)); binarystmtinexcl_553857_839829468(p0, e0, d0, LOC6); } break; case ((Tmagic290524) 40): { NimStringDesc* ts0; NimStringDesc* LOC8; NimStringDesc* LOC9; NimStringDesc* LOC10; LOC8 = (NimStringDesc*)0; LOC9 = (NimStringDesc*)0; LOC9 = nimIntToStr((NI)(size0 * ((NI) 8))); LOC8 = rawNewString(LOC9->Sup.len + 2); appendString(LOC8, ((NimStringDesc*) &T839829468_45)); appendString(LOC8, LOC9); ts0 = LOC8; LOC10 = (NimStringDesc*)0; LOC10 = rawNewString(ts0->Sup.len + ts0->Sup.len + 42); appendString(LOC10, ((NimStringDesc*) &T839829468_452)); appendString(LOC10, ts0); appendString(LOC10, ((NimStringDesc*) &T839829468_453)); appendString(LOC10, ts0); appendString(LOC10, ((NimStringDesc*) &T839829468_454)); binarystmtinexcl_553857_839829468(p0, e0, d0, LOC10); } break; case ((Tmagic290524) 41): { { if (!(size0 <= ((NI) 4))) goto LA14; unaryexprchar_549222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_455)); } goto LA12; LA14: ; { unaryexprchar_549222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_456)); } LA12: ; } break; case ((Tmagic290524) 133): { binaryexprchar_548809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_457)); } break; case ((Tmagic290524) 132): { binaryexprchar_548809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_458)); } break; case ((Tmagic290524) 131): { binaryexpr_548549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341)); } break; case ((Tmagic290524) 134): { binaryexpr_548549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_459)); } break; case ((Tmagic290524) 135): { binaryexpr_548549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_460)); } break; case ((Tmagic290524) 136): { binaryexpr_548549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_461)); } break; case ((Tmagic290524) 137): { binaryexpr_548549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_462)); } break; case ((Tmagic290524) 148): { geninop_554009_839829468(p0, e0, d0); } break; default: { internalerror_194100_155036129((*e0).info, ((NimStringDesc*) &T839829468_472)); } break; } } break; default: { switch (op0) { case ((Tmagic290524) 39): { binarystmtinexcl_553857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_473)); } break; case ((Tmagic290524) 40): { binarystmtinexcl_553857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_474)); } break; case ((Tmagic290524) 41): { NimStringDesc* LOC30; NimStringDesc* LOC31; LOC30 = (NimStringDesc*)0; LOC31 = (NimStringDesc*)0; LOC31 = nimIntToStr(size0); LOC30 = rawNewString(LOC31->Sup.len + 14); appendString(LOC30, ((NimStringDesc*) &T839829468_475)); appendString(LOC30, LOC31); appendChar(LOC30, 41); unaryexprchar_549222_839829468(p0, e0, d0, LOC30); } break; case ((Tmagic290524) 133): case ((Tmagic290524) 132): { Ttype290840* LOC33; TY534475 LOC39; LOC33 = (Ttype290840*)0; LOC33 = getsystype_336150_3937434831(((Ttypekind290244) 31)); gettemp_535032_839829468(p0, LOC33, (&i0), NIM_FALSE); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { Ttype290840* LOC38; if (!((*d0).k == ((Tlockind290808) 0))) goto LA36; LOC38 = (Ttype290840*)0; LOC38 = getsystype_336150_3937434831(((Ttypekind290244) 1)); gettemp_535032_839829468(p0, LOC38, d0, NIM_FALSE); } LA36: ; memset((void*)LOC39, 0, sizeof(LOC39)); LOC39[0] = rdloc_536188_839829468(i0); LOC39[1] = rope_177401_2381377266(((NI64) (size0))); LOC39[2] = rdloc_536188_839829468((*d0)); LOC39[3] = rdloc_536188_839829468(a0); LOC39[4] = rdloc_536188_839829468(b0); linef_530700_839829468(p0, ((Tcprocsection527011) 2), lookupopr_554426_839829468[(op0)- 132], LOC39, 5); } break; case ((Tmagic290524) 131): { NimStringDesc* LOC41; NimStringDesc* LOC42; usestringh_530345_839829468((*p0).module); LOC41 = (NimStringDesc*)0; LOC42 = (NimStringDesc*)0; LOC42 = nimIntToStr(size0); LOC41 = rawNewString(LOC42->Sup.len + 21); appendString(LOC41, ((NimStringDesc*) &T839829468_481)); appendString(LOC41, LOC42); appendString(LOC41, ((NimStringDesc*) &T839829468_482)); binaryexprchar_548809_839829468(p0, e0, d0, LOC41); } break; case ((Tmagic290524) 134): case ((Tmagic290524) 135): case ((Tmagic290524) 136): case ((Tmagic290524) 137): { Ttype290840* LOC44; TY534847 LOC49; LOC44 = (Ttype290840*)0; LOC44 = getsystype_336150_3937434831(((Ttypekind290244) 31)); gettemp_535032_839829468(p0, LOC44, (&i0), NIM_FALSE); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); { if (!((*d0).k == ((Tlockind290808) 0))) goto LA47; gettemp_535032_839829468(p0, a0.t, d0, NIM_FALSE); } LA47: ; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_536188_839829468(i0); LOC49[1] = rope_177401_2381377266(((NI64) (size0))); LOC49[2] = rdloc_536188_839829468((*d0)); LOC49[3] = rdloc_536188_839829468(a0); LOC49[4] = rdloc_536188_839829468(b0); LOC49[5] = rope_177277_2381377266(lookupopr_554426_839829468[(op0)- 132]); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_483), LOC49, 6); } break; case ((Tmagic290524) 148): { geninop_554009_839829468(p0, e0, d0); } break; default: { internalerror_194100_155036129((*e0).info, ((NimStringDesc*) &T839829468_484)); } break; } } break; } } static N_INLINE(Ropeobj177006*, genargstringtocstring_537776_839829468)(Tcproc527021* p0, Tnode290802* n0) { Ropeobj177006* result0; Tloc290816 a0; TY177507 LOC1; result0 = (Ropeobj177006*)0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_536188_839829468(a0); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_485), LOC1, 1); return result0; } N_NIMCALL(Ropeobj177006*, openarrayloc_537665_839829468)(Tcproc527021* p0, Tnode290802* n0) { Ropeobj177006* result0; Tloc290816 a0; Tnode290802* q0; result0 = (Ropeobj177006*)0; memset((void*)(&a0), 0, sizeof(a0)); q0 = skipconv_326882_3876443242(n0); { Tmagic290524 LOC3; Tloc290816 b0; Tloc290816 c0; Tnode290802* LOC6; Tnode290802* LOC7; Tnode290802* LOC8; NimStringDesc* fmt0; Ttype290840* LOC9; TY533238 LOC25; LOC3 = (Tmagic290524)0; LOC3 = getmagic_316502_2616423590(q0); if (!(LOC3 == ((Tmagic290524) 139))) goto LA4; memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&c0), 0, sizeof(c0)); LOC6 = (Tnode290802*)0; LOC6 = HEX5BHEX5D_291238_850551059(q0, ((NI) 1)); initlocexpr_537283_839829468(p0, LOC6, (&a0)); LOC7 = (Tnode290802*)0; LOC7 = HEX5BHEX5D_291238_850551059(q0, ((NI) 2)); initlocexpr_537283_839829468(p0, LOC7, (&b0)); LOC8 = (Tnode290802*)0; LOC8 = HEX5BHEX5D_291238_850551059(q0, ((NI) 3)); initlocexpr_537283_839829468(p0, LOC8, (&c0)); LOC9 = (Ttype290840*)0; LOC9 = skiptypes_294099_850551059(a0.t, IL64(211106243062016)); switch ((*LOC9).kind) { case ((Ttypekind290244) 27): case ((Ttypekind290244) 48): case ((Ttypekind290244) 16): case ((Ttypekind290244) 4): { fmt0 = copyString(((NimStringDesc*) &T839829468_486)); } break; case ((Ttypekind290244) 28): case ((Ttypekind290244) 24): { { NIM_BOOL LOC14; Ttype290840* LOC15; NIM_BOOL LOC17; LOC14 = (NIM_BOOL)0; LOC15 = (Ttype290840*)0; LOC15 = skiptypes_294099_850551059((*n0).typ, IL64(211106232576256)); LOC14 = ((*LOC15).kind == ((Ttypekind290244) 23)); if (!(LOC14)) goto LA16; LOC17 = (NIM_BOOL)0; LOC17 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC17) goto LA18; LOC17 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA18: ; LOC14 = !(LOC17); LA16: ; if (!LOC14) goto LA19; fmt0 = copyString(((NimStringDesc*) &T839829468_487)); } goto LA12; LA19: ; { fmt0 = copyString(((NimStringDesc*) &T839829468_488)); } LA12: ; } break; default: { NimStringDesc* LOC23; NimStringDesc* LOC24; LOC23 = (NimStringDesc*)0; LOC24 = (NimStringDesc*)0; LOC24 = typetostring_318017_3876443242(a0.t, ((Tprefereddesc318011) 0)); LOC23 = rawNewString(LOC24->Sup.len + 14); appendString(LOC23, ((NimStringDesc*) &T839829468_489)); appendString(LOC23, LOC24); internalerror_194113_155036129(LOC23); fmt0 = copyString(((NimStringDesc*) &T839829468_490)); } break; } memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = rdloc_536188_839829468(a0); LOC25[1] = rdloc_536188_839829468(b0); LOC25[2] = rdloc_536188_839829468(c0); result0 = HEX25_177905_2381377266(fmt0, LOC25, 3); } goto LA1; LA4: ; { Ttype290840* LOC27; initlocexpr_537283_839829468(p0, n0, (&a0)); LOC27 = (Ttype290840*)0; LOC27 = skiptypes_294099_850551059(a0.t, IL64(211106240964864)); switch ((*LOC27).kind) { case ((Ttypekind290244) 27): case ((Ttypekind290244) 48): { TY177507 LOC29; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdloc_536188_839829468(a0); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_378), LOC29, 1); } break; case ((Ttypekind290244) 28): case ((Ttypekind290244) 24): { { NIM_BOOL LOC33; Ttype290840* LOC34; NIM_BOOL LOC36; TY530811 LOC40; LOC33 = (NIM_BOOL)0; LOC34 = (Ttype290840*)0; LOC34 = skiptypes_294099_850551059((*n0).typ, IL64(211106232576256)); LOC33 = ((*LOC34).kind == ((Ttypekind290244) 23)); if (!(LOC33)) goto LA35; LOC36 = (NIM_BOOL)0; LOC36 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC36) goto LA37; LOC36 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA37: ; LOC33 = !(LOC36); LA35: ; if (!LOC33) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = rdloc_536188_839829468(a0); LOC40[1] = lenfield_537305_839829468(p0); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_491), LOC40, 2); } goto LA31; LA38: ; { TY530811 LOC42; memset((void*)LOC42, 0, sizeof(LOC42)); LOC42[0] = rdloc_536188_839829468(a0); LOC42[1] = lenfield_537305_839829468(p0); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_379), LOC42, 2); } LA31: ; } break; case ((Ttypekind290244) 16): case ((Ttypekind290244) 4): { TY530811 LOC44; NI64 LOC45; memset((void*)LOC44, 0, sizeof(LOC44)); LOC44[0] = rdloc_536188_839829468(a0); LOC45 = (NI64)0; LOC45 = lengthord_318007_3876443242(a0.t); LOC44[1] = rope_177401_2381377266(LOC45); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_380), LOC44, 2); } break; case ((Ttypekind290244) 21): case ((Ttypekind290244) 22): { Ttype290840* LOC47; LOC47 = (Ttype290840*)0; LOC47 = lastson_293377_850551059(a0.t); switch ((*LOC47).kind) { case ((Ttypekind290244) 28): case ((Ttypekind290244) 24): { TY530811 LOC49; memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_536188_839829468(a0); LOC49[1] = lenfield_537305_839829468(p0); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_491), LOC49, 2); } break; case ((Ttypekind290244) 16): case ((Ttypekind290244) 4): { TY530811 LOC51; Ttype290840* LOC52; NI64 LOC53; memset((void*)LOC51, 0, sizeof(LOC51)); LOC51[0] = rdloc_536188_839829468(a0); LOC52 = (Ttype290840*)0; LOC52 = lastson_293377_850551059(a0.t); LOC53 = (NI64)0; LOC53 = lengthord_318007_3876443242(LOC52); LOC51[1] = rope_177401_2381377266(LOC53); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_380), LOC51, 2); } break; default: { NimStringDesc* LOC55; NimStringDesc* LOC56; LOC55 = (NimStringDesc*)0; LOC56 = (NimStringDesc*)0; LOC56 = typetostring_318017_3876443242(a0.t, ((Tprefereddesc318011) 0)); LOC55 = rawNewString(LOC56->Sup.len + 14); appendString(LOC55, ((NimStringDesc*) &T839829468_489)); appendString(LOC55, LOC56); internalerror_194113_155036129(LOC55); } break; } } break; default: { NimStringDesc* LOC58; NimStringDesc* LOC59; LOC58 = (NimStringDesc*)0; LOC59 = (NimStringDesc*)0; LOC59 = typetostring_318017_3876443242(a0.t, ((Tprefereddesc318011) 0)); LOC58 = rawNewString(LOC59->Sup.len + 14); appendString(LOC58, ((NimStringDesc*) &T839829468_489)); appendString(LOC58, LOC59); internalerror_194113_155036129(LOC58); } break; } } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, genarg_537787_839829468)(Tcproc527021* p0, Tnode290802* n_537790_839829468, Tsym290834* param0, Tnode290802* call0) { Ropeobj177006* result0; Tloc290816 a0; result0 = (Ropeobj177006*)0; memset((void*)(&a0), 0, sizeof(a0)); { if (!((*n_537790_839829468).kind == ((Tnodekind290020) 71))) goto LA3; result0 = genargstringtocstring_537776_839829468(p0, n_537790_839829468); } goto LA1; LA3: ; { Ttype290840* LOC6; Tnode290802* n0; LOC6 = (Ttype290840*)0; LOC6 = skiptypes_294099_850551059((*param0).typ, IL64(211106240964864)); if (!((*LOC6).kind == ((Ttypekind290244) 27) || (*LOC6).kind == ((Ttypekind290244) 48))) goto LA7; { if (!!(((*n_537790_839829468).kind == ((Tnodekind290020) 64)))) goto LA11; n0 = n_537790_839829468; } goto LA9; LA11: ; { n0 = (*n_537790_839829468).kindU.S6.sons->data[((NI) 0)]; } LA9: ; result0 = openarrayloc_537665_839829468(p0, n0); } goto LA1; LA7: ; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = ccgintroducedptr_531609_839829468(param0); if (!LOC15) goto LA16; initlocexpr_537283_839829468(p0, n_537790_839829468, (&a0)); result0 = addrloc_536204_839829468(a0); } goto LA1; LA16: ; { NIM_BOOL LOC19; NIM_BOOL LOC20; NIM_BOOL LOC21; Tnode290802* callee0; LOC19 = (NIM_BOOL)0; LOC20 = (NIM_BOOL)0; LOC21 = (NIM_BOOL)0; LOC21 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC21) goto LA22; LOC21 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA22: ; LOC20 = LOC21; if (!(LOC20)) goto LA23; LOC20 = ((*(*param0).typ).kind == ((Ttypekind290244) 23)); LA23: ; LOC19 = LOC20; if (!(LOC19)) goto LA24; LOC19 = ((*n_537790_839829468).kind == ((Tnodekind290020) 64)); LA24: ; if (!LOC19) goto LA25; initlocexprsingleuse_537289_839829468(p0, (*n_537790_839829468).kindU.S6.sons->data[((NI) 0)], (&a0)); callee0 = (*call0).kindU.S6.sons->data[((NI) 0)]; { NIM_BOOL LOC29; NIM_BOOL LOC30; LOC29 = (NIM_BOOL)0; LOC30 = (NIM_BOOL)0; LOC30 = ((*callee0).kind == ((Tnodekind290020) 3)); if (!(LOC30)) goto LA31; LOC30 = ((134283296 & (*(*callee0).kindU.S4.sym).flags) == 32); LA31: ; LOC29 = LOC30; if (!(LOC29)) goto LA32; LOC29 = !(((72 & (*(*callee0).kindU.S4.sym).loc.flags) == 0)); LA32: ; if (!LOC29) goto LA33; result0 = addrloc_536204_839829468(a0); } goto LA27; LA33: ; { result0 = rdloc_536188_839829468(a0); } LA27: ; } goto LA1; LA25: ; { initlocexprsingleuse_537289_839829468(p0, n_537790_839829468, (&a0)); result0 = rdloc_536188_839829468(a0); } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, genargnoparam_537938_839829468)(Tcproc527021* p0, Tnode290802* n0) { Ropeobj177006* result0; Tloc290816 a0; result0 = (Ropeobj177006*)0; memset((void*)(&a0), 0, sizeof(a0)); { if (!((*n0).kind == ((Tnodekind290020) 71))) goto LA3; result0 = genargstringtocstring_537776_839829468(p0, n0); } goto LA1; LA3: ; { initlocexprsingleuse_537289_839829468(p0, n0, (&a0)); result0 = rdloc_536188_839829468(a0); } LA1: ; return result0; } N_NIMCALL(Ropeobj177006*, getrawproctype_538459_839829468)(Tcproc527021* p0, Ttype290840* t0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = getclosuretype_533683_839829468((*p0).module, t0, ((Tclosuretypekind533679) 0)); return result0; } N_NIMCALL(NIM_BOOL, leftappearsonrightside_537329_839829468)(Tnode290802* le0, Tnode290802* ri0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!!((le0 == NIM_NIL))) goto LA3; { NI i_537364_839829468; NI HEX3Atmp_537376_839829468; NI LOC6; NI res_537379_839829468; i_537364_839829468 = (NI)0; HEX3Atmp_537376_839829468 = (NI)0; LOC6 = (NI)0; LOC6 = len_291081_850551059(ri0); HEX3Atmp_537376_839829468 = (LOC6 - 1); res_537379_839829468 = ((NI) 1); { while (1) { Tnode290802* r0; if (!(res_537379_839829468 <= HEX3Atmp_537376_839829468)) goto LA8; i_537364_839829468 = res_537379_839829468; r0 = HEX5BHEX5D_291238_850551059(ri0, i_537364_839829468); { Tanalysisresult471003 LOC11; LOC11 = (Tanalysisresult471003)0; LOC11 = ispartof_471340_788060399(le0, r0); if (!!((LOC11 == ((Tanalysisresult471003) 0)))) goto LA12; result0 = NIM_TRUE; goto BeforeRet; } LA12: ; res_537379_839829468 += ((NI) 1); } LA8: ; } } } LA3: ; }BeforeRet: ; return result0; } static N_INLINE(NIM_BOOL, hasnoinit_537383_839829468)(Tnode290802* call0) { NIM_BOOL result0; NIM_BOOL LOC1; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((*(*call0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3)); if (!(LOC1)) goto LA2; LOC1 = (((*(*(*call0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag290184) 12))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, resetloc_536350_839829468)(Tcproc527021* p0, Tloc290816* loc0) { NIM_BOOL containsgcref0; Ttype290840* typ0; { containsgcref0 = containsgarbagecollectedref_318117_3876443242((*loc0).t); typ0 = skiptypes_294099_850551059((*loc0).t, IL64(211106242013440)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = isimportedcpptype_531476_839829468(typ0); if (!LOC3) goto LA4; goto BeforeRet; } LA4: ; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = iscomplexvaluetype_536317_839829468(typ0); if (!!(LOC8)) goto LA9; { Tloc290816 nilloc0; if (!containsgcref0) goto LA13; memset((void*)(&nilloc0), 0, sizeof(nilloc0)); initloc_530273_839829468((&nilloc0), ((Tlockind290808) 1), (*loc0).t, ((Tstorageloc290812) 2)); nilloc0.r = rope_177277_2381377266(((NimStringDesc*) &T839829468_174)); genrefassign_536311_839829468(p0, (*loc0), nilloc0, 8); } goto LA11; LA13: ; { TY177507 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_536188_839829468((*loc0)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_494), LOC16, 1); } LA11: ; } goto LA6; LA9: ; { { TY177507 LOC22; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 6))&31U)))!=0)) goto LA20; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = addrloc_536204_839829468((*loc0)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_495), LOC22, 1); } LA20: ; { TY530811 LOC27; if (!!(((*loc0).s == ((Tstorageloc290812) 2)))) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = addrloc_536204_839829468((*loc0)); LOC27[1] = gentypeinfo_533941_839829468((*p0).module, (*loc0).t); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_496), LOC27, 2); genobjectinit_536242_839829468(p0, ((Tcprocsection527011) 2), (*loc0).t, (*loc0), NIM_TRUE); } goto LA23; LA25: ; { TY530811 LOC29; usestringh_530345_839829468((*p0).module); memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = addrloc_536204_839829468((*loc0)); LOC29[1] = rdloc_536188_839829468((*loc0)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_152), LOC29, 2); genobjectinit_536242_839829468(p0, ((Tcprocsection527011) 2), (*loc0).t, (*loc0), NIM_TRUE); } LA23: ; } LA6: ; }BeforeRet: ; } N_NIMCALL(Ropeobj177006*, addcomma_538464_839829468)(Ropeobj177006* r0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { if (!(r0 == NIM_NIL)) goto LA3; result0 = r0; } goto LA1; LA3: ; { TY531289 LOC6; Ropeobj177006* LOC7; memset((void*)LOC6, 0, sizeof(LOC6)); LOC7 = (Ropeobj177006*)0; LOC7 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC6, 0); result0 = HEX26_177418_2381377266(r0, LOC7); } LA1: ; return result0; } N_NIMCALL(void, genclosurecall_538452_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* d0) { Tloc290816 op0; Ropeobj177006* pl0; Ttype290840* typ0; NI length0; Ropeobj177006* rawproc0; NimStringDesc* callpattern0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_537283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); pl0 = (Ropeobj177006*)0; typ0 = skiptypes_294099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_293351_850551059(ri0); { NI i_538613_839829468; NI HEX3Atmp_539214_839829468; NI res_539217_839829468; i_538613_839829468 = (NI)0; HEX3Atmp_539214_839829468 = (NI)0; HEX3Atmp_539214_839829468 = (NI)(length0 - ((NI) 1)); res_539217_839829468 = ((NI) 1); { while (1) { if (!(res_539217_839829468 <= HEX3Atmp_539214_839829468)) goto LA3; i_538613_839829468 = res_539217_839829468; { NI LOC6; Tnode290802* paramtype0; LOC6 = (NI)0; LOC6 = sonslen_293327_850551059(typ0); if (!(i_538613_839829468 < LOC6)) goto LA7; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_538613_839829468]; { NIM_BOOL LOC11; Ropeobj177006* LOC20; LOC11 = (NIM_BOOL)0; LOC11 = iscompiletimeonly_326706_3876443242((*paramtype0).typ); if (!!(LOC11)) goto LA12; { TY531289 LOC18; Ropeobj177006* LOC19; if (!!((pl0 == NIM_NIL))) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (Ropeobj177006*)0; LOC19 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0); add_177482_2381377266(&pl0, LOC19); } LA16: ; LOC20 = (Ropeobj177006*)0; LOC20 = genarg_537787_839829468(p0, (*ri0).kindU.S6.sons->data[i_538613_839829468], (*paramtype0).kindU.S4.sym, ri0); add_177482_2381377266(&pl0, LOC20); } LA12: ; } goto LA4; LA7: ; { Ropeobj177006* LOC28; { TY531289 LOC26; Ropeobj177006* LOC27; if (!!((pl0 == NIM_NIL))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC27 = (Ropeobj177006*)0; LOC27 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0); add_177482_2381377266(&pl0, LOC27); } LA24: ; LOC28 = (Ropeobj177006*)0; LOC28 = genargnoparam_537938_839829468(p0, (*ri0).kindU.S6.sons->data[i_538613_839829468]); add_177482_2381377266(&pl0, LOC28); } LA4: ; res_539217_839829468 += ((NI) 1); } LA3: ; } } rawproc0 = getrawproctype_538459_839829468(p0, typ0); { if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 14))&31U)))!=0)) goto LA31; callpattern0 = copyString(((NimStringDesc*) &T839829468_492)); } goto LA29; LA31: ; { callpattern0 = copyString(((NimStringDesc*) &T839829468_493)); } LA29: ; { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA36; { NIM_BOOL LOC40; LOC40 = (NIM_BOOL)0; LOC40 = isinvalidreturntype_531548_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC40) goto LA41; { NI LOC45; TY531289 LOC48; Ropeobj177006* LOC49; LOC45 = (NI)0; LOC45 = sonslen_293351_850551059(ri0); if (!(((NI) 1) < LOC45)) goto LA46; memset((void*)LOC48, 0, sizeof(LOC48)); LOC49 = (Ropeobj177006*)0; LOC49 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC48, 0); add_177482_2381377266(&pl0, LOC49); } LA46: ; { NIM_BOOL LOC52; NIM_BOOL LOC54; Ropeobj177006* LOC67; NimStringDesc* LOC68; TY533235 LOC69; LOC52 = (NIM_BOOL)0; LOC52 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0); if (LOC52) goto LA53; LOC54 = (NIM_BOOL)0; LOC54 = leftappearsonrightside_537329_839829468(le0, ri0); LOC52 = !(LOC54); LA53: ; if (!LOC52) goto LA55; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA59; gettemp_535032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } goto LA57; LA59: ; { NIM_BOOL LOC62; NIM_BOOL LOC64; LOC62 = (NIM_BOOL)0; LOC62 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0)); if (!(LOC62)) goto LA63; LOC64 = (NIM_BOOL)0; LOC64 = hasnoinit_537383_839829468(ri0); LOC62 = !(LOC64); LA63: ; if (!LOC62) goto LA65; resetloc_536350_839829468(p0, d0); } goto LA57; LA65: ; LA57: ; LOC67 = (Ropeobj177006*)0; LOC67 = addrloc_536204_839829468((*d0)); add_177482_2381377266(&pl0, LOC67); LOC68 = (NimStringDesc*)0; LOC68 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC68, callpattern0); appendString(LOC68, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC69, 0, sizeof(LOC69)); LOC69[0] = op0.r; LOC69[1] = pl0; LOC69[2] = addcomma_538464_839829468(pl0); LOC69[3] = rawproc0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), LOC68, LOC69, 4); } goto LA50; LA55: ; { Tloc290816 tmp0; Ropeobj177006* LOC71; NimStringDesc* LOC72; TY533235 LOC73; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_535032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC71 = (Ropeobj177006*)0; LOC71 = addrloc_536204_839829468(tmp0); add_177482_2381377266(&pl0, LOC71); LOC72 = (NimStringDesc*)0; LOC72 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC72, callpattern0); appendString(LOC72, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC73, 0, sizeof(LOC73)); LOC73[0] = op0.r; LOC73[1] = pl0; LOC73[2] = addcomma_538464_839829468(pl0); LOC73[3] = rawproc0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), LOC72, LOC73, 4); genassignment_537264_839829468(p0, (*d0), tmp0, 0); } LA50: ; } goto LA38; LA41: ; { Tloc290816 list0; TY533235 LOC79; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA77; gettemp_535032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA77: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_530273_839829468((&list0), ((Tlockind290808) 9), (*d0).t, ((Tstorageloc290812) 0)); memset((void*)LOC79, 0, sizeof(LOC79)); LOC79[0] = op0.r; LOC79[1] = pl0; LOC79[2] = addcomma_538464_839829468(pl0); LOC79[3] = rawproc0; list0.r = HEX25_177905_2381377266(callpattern0, LOC79, 4); genassignment_537264_839829468(p0, (*d0), list0, 0); } LA38: ; } goto LA34; LA36: ; { NimStringDesc* LOC81; TY533235 LOC82; LOC81 = (NimStringDesc*)0; LOC81 = rawNewString(callpattern0->Sup.len + 3); appendString(LOC81, callpattern0); appendString(LOC81, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC82, 0, sizeof(LOC82)); LOC82[0] = op0.r; LOC82[1] = pl0; LOC82[2] = addcomma_538464_839829468(pl0); LOC82[3] = rawproc0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), LOC81, LOC82, 4); } LA34: ; } N_NIMCALL(Ropeobj177006*, genotherarg_537277_839829468)(Tcproc527021* p0, Tnode290802* ri0, NI i0, Ttype290840* typ0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { NI LOC3; Tnode290802* paramtype0; LOC3 = (NI)0; LOC3 = sonslen_293327_850551059(typ0); if (!(i0 < LOC3)) goto LA4; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i0]; { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = iscompiletimeonly_326706_3876443242((*paramtype0).typ); if (!LOC8) goto LA9; result0 = NIM_NIL; } goto LA6; LA9: ; { NIM_BOOL LOC12; Tnode290802* LOC16; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*typ0).sons->data[i0]).kind == ((Ttypekind290244) 23)); if (!(LOC12)) goto LA13; LOC12 = ((*(*ri0).kindU.S6.sons->data[i0]).kind == ((Tnodekind290020) 64)); LA13: ; if (!LOC12) goto LA14; LOC16 = (Tnode290802*)0; LOC16 = HEX5BHEX5D_291238_850551059((*ri0).kindU.S6.sons->data[i0], ((NI) 0)); result0 = genargnoparam_537938_839829468(p0, LOC16); } goto LA6; LA14: ; { result0 = genargnoparam_537938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]); } LA6: ; } goto LA1; LA4: ; { { if (!!((((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 0))&31U)))!=0))) goto LA21; localerror_194085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_501)); result0 = NIM_NIL; } goto LA19; LA21: ; { result0 = genargnoparam_537938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]); } LA19: ; } LA1: ; return result0; } N_NIMCALL(Tnode290802*, skipaddrderef_539433_839829468)(Tnode290802* node0) { Tnode290802* result0; Tnode290802* n0; NIM_BOOL isaddr0; { result0 = (Tnode290802*)0; n0 = node0; isaddr0 = NIM_FALSE; switch ((*n0).kind) { case ((Tnodekind290020) 63): case ((Tnodekind290020) 64): { n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; isaddr0 = NIM_TRUE; } break; case ((Tnodekind290020) 47): case ((Tnodekind290020) 65): { n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } break; default: { result0 = n0; goto BeforeRet; } break; } { if (!((*n0).kind == ((Tnodekind290020) 66))) goto LA6; n0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } LA6: ; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = isaddr0; if (!(LOC10)) goto LA11; LOC10 = ((*n0).kind == ((Tnodekind290020) 47) || (*n0).kind == ((Tnodekind290020) 65)); LA11: ; if (!LOC10) goto LA12; result0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } goto LA8; LA12: ; { if (!((*n0).kind == ((Tnodekind290020) 63) || (*n0).kind == ((Tnodekind290020) 64))) goto LA15; result0 = (*n0).kindU.S6.sons->data[((NI) 0)]; } goto LA8; LA15: ; { result0 = node0; } LA8: ; }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj177006*, genthisarg_539475_839829468)(Tcproc527021* p0, Tnode290802* ri_539478_839829468, NI i0, Ttype290840* typ0) { Ropeobj177006* result0; Tnode290802* ri0; Ttype290840* t0; result0 = (Ropeobj177006*)0; { NI LOC3; NimStringDesc* LOC6; LOC3 = (NI)0; LOC3 = sonslen_293327_850551059(typ0); if (!!((i0 < LOC3))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_194185_1689653243(T839829468_503); internalerror_194113_155036129(LOC6); } LA4: ; ri0 = HEX5BHEX5D_291238_850551059(ri_539478_839829468, i0); { while (1) { if (!((*ri0).kind == ((Tnodekind290020) 66))) goto LA8; ri0 = HEX5BHEX5D_291238_850551059(ri0, ((NI) 0)); } LA8: ; } t0 = skiptypes_294099_850551059((*typ0).sons->data[i0], 2048); { Tnode290802* x0; if (!((*t0).kind == ((Ttypekind290244) 23))) goto LA11; { if (!((*ri0).kind == ((Tnodekind290020) 64))) goto LA15; x0 = HEX5BHEX5D_291238_850551059(ri0, ((NI) 0)); } goto LA13; LA15: ; { x0 = ri0; } LA13: ; { if (!((*(*x0).typ).kind == ((Ttypekind290244) 21))) goto LA20; result0 = genargnoparam_537938_839829468(p0, x0); add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } goto LA18; LA20: ; { NIM_BOOL LOC23; Tnode290802* LOC25; Tnode290802* LOC28; LOC23 = (NIM_BOOL)0; LOC23 = ((*x0).kind == ((Tnodekind290020) 65) || (*x0).kind == ((Tnodekind290020) 47)); if (!(LOC23)) goto LA24; LOC25 = (Tnode290802*)0; LOC25 = HEX5BHEX5D_291238_850551059(x0, ((NI) 0)); LOC23 = ((*(*LOC25).typ).kind == ((Ttypekind290244) 21)); LA24: ; if (!LOC23) goto LA26; LOC28 = (Tnode290802*)0; LOC28 = HEX5BHEX5D_291238_850551059(x0, ((NI) 0)); result0 = genargnoparam_537938_839829468(p0, LOC28); add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } goto LA18; LA26: ; { result0 = genargnoparam_537938_839829468(p0, x0); add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } LA18: ; } goto LA9; LA11: ; { if (!((*t0).kind == ((Ttypekind290244) 21))) goto LA31; { Tnode290802* LOC37; if (!((*ri0).kind == ((Tnodekind290020) 63) || (*ri0).kind == ((Tnodekind290020) 64))) goto LA35; LOC37 = (Tnode290802*)0; LOC37 = HEX5BHEX5D_291238_850551059(ri0, ((NI) 0)); result0 = genargnoparam_537938_839829468(p0, LOC37); add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } goto LA33; LA35: ; { result0 = genargnoparam_537938_839829468(p0, ri0); add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_504)); } LA33: ; } goto LA9; LA31: ; { ri0 = skipaddrderef_539433_839829468(ri0); { if (!((*ri0).kind == ((Tnodekind290020) 63) || (*ri0).kind == ((Tnodekind290020) 64))) goto LA42; ri0 = HEX5BHEX5D_291238_850551059(ri0, ((NI) 0)); } LA42: ; result0 = genargnoparam_537938_839829468(p0, ri0); add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_257)); } LA9: ; return result0; } N_NIMCALL(Ropeobj177006*, genpatterncall_539699_839829468)(Tcproc527021* p0, Tnode290802* ri_539702_839829468, NimStringDesc* pat0, Ttype290840* typ_539704_839829468) { Ropeobj177006* result0; NI i0; NI j0; result0 = (Ropeobj177006*)0; i0 = ((NI) 0); j0 = ((NI) 1); { while (1) { if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA2; switch (((NU8)(pat0->data[i0]))) { case 64: { { NI LOC6; Ropeobj177006* LOC9; LOC6 = (NI)0; LOC6 = len_291081_850551059(ri_539702_839829468); if (!(j0 < LOC6)) goto LA7; LOC9 = (Ropeobj177006*)0; LOC9 = genotherarg_537277_839829468(p0, ri_539702_839829468, j0, typ_539704_839829468); add_177482_2381377266(&result0, LOC9); { NI k_539728_839829468; NI HEX3Atmp_539904_839829468; NI HEX3Atmp_539905_839829468; NI LOC11; NI res_539908_839829468; k_539728_839829468 = (NI)0; HEX3Atmp_539904_839829468 = (NI)0; HEX3Atmp_539905_839829468 = (NI)0; HEX3Atmp_539904_839829468 = (NI)(j0 + ((NI) 1)); LOC11 = (NI)0; LOC11 = len_291081_850551059(ri_539702_839829468); HEX3Atmp_539905_839829468 = (LOC11 - 1); res_539908_839829468 = HEX3Atmp_539904_839829468; { while (1) { TY531289 LOC14; Ropeobj177006* LOC15; Ropeobj177006* LOC16; if (!(res_539908_839829468 <= HEX3Atmp_539905_839829468)) goto LA13; k_539728_839829468 = res_539908_839829468; memset((void*)LOC14, 0, sizeof(LOC14)); LOC15 = (Ropeobj177006*)0; LOC15 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC14, 0); add_177482_2381377266(&result0, LOC15); LOC16 = (Ropeobj177006*)0; LOC16 = genotherarg_537277_839829468(p0, ri_539702_839829468, k_539728_839829468, typ_539704_839829468); add_177482_2381377266(&result0, LOC16); res_539908_839829468 += ((NI) 1); } LA13: ; } } } LA7: ; i0 += ((NI) 1); } break; case 35: { { Tnode290802* ri0; if (!(((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(43)) || ((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(64)))) goto LA20; ri0 = HEX5BHEX5D_291238_850551059(ri_539702_839829468, j0); { Ttype290840* typ0; TY531289 LOC31; Ropeobj177006* LOC32; TY531289 LOC46; Ropeobj177006* LOC47; if (!((*ri0).kind == ((Tnodekind290020) 27) || (*ri0).kind == ((Tnodekind290020) 29) || (*ri0).kind == ((Tnodekind290020) 30) || (*ri0).kind == ((Tnodekind290020) 31) || (*ri0).kind == ((Tnodekind290020) 26) || (*ri0).kind == ((Tnodekind290020) 28) || (*ri0).kind == ((Tnodekind290020) 32))) goto LA24; typ0 = skiptypes_294099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { Ropeobj177006* LOC30; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(43))) goto LA28; LOC30 = (Ropeobj177006*)0; LOC30 = genargnoparam_537938_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)]); add_177482_2381377266(&result0, LOC30); } LA28: ; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj177006*)0; LOC32 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_118), LOC31, 0); add_177482_2381377266(&result0, LOC32); { NI LOC35; Ropeobj177006* LOC38; LOC35 = (NI)0; LOC35 = len_291081_850551059(ri0); if (!(((NI) 1) < LOC35)) goto LA36; LOC38 = (Ropeobj177006*)0; LOC38 = genotherarg_537277_839829468(p0, ri0, ((NI) 1), typ0); add_177482_2381377266(&result0, LOC38); } LA36: ; { NI k_539793_839829468; NI HEX3Atmp_539915_839829468; NI HEX3Atmp_539916_839829468; NI LOC40; NI res_539919_839829468; k_539793_839829468 = (NI)0; HEX3Atmp_539915_839829468 = (NI)0; HEX3Atmp_539916_839829468 = (NI)0; HEX3Atmp_539915_839829468 = (NI)(j0 + ((NI) 1)); LOC40 = (NI)0; LOC40 = len_291081_850551059(ri0); HEX3Atmp_539916_839829468 = (LOC40 - 1); res_539919_839829468 = HEX3Atmp_539915_839829468; { while (1) { TY531289 LOC43; Ropeobj177006* LOC44; Ropeobj177006* LOC45; if (!(res_539919_839829468 <= HEX3Atmp_539916_839829468)) goto LA42; k_539793_839829468 = res_539919_839829468; memset((void*)LOC43, 0, sizeof(LOC43)); LOC44 = (Ropeobj177006*)0; LOC44 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC43, 0); add_177482_2381377266(&result0, LOC44); LOC45 = (Ropeobj177006*)0; LOC45 = genotherarg_537277_839829468(p0, ri0, k_539793_839829468, typ0); add_177482_2381377266(&result0, LOC45); res_539919_839829468 += ((NI) 1); } LA42: ; } } memset((void*)LOC46, 0, sizeof(LOC46)); LOC47 = (Ropeobj177006*)0; LOC47 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_117), LOC46, 0); add_177482_2381377266(&result0, LOC47); } goto LA22; LA24: ; { localerror_194085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_502)); } LA22: ; i0 += ((NI) 1); } goto LA18; LA20: ; { Ropeobj177006* LOC52; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(46))) goto LA50; LOC52 = (Ropeobj177006*)0; LOC52 = genthisarg_539475_839829468(p0, ri_539702_839829468, j0, typ_539704_839829468); add_177482_2381377266(&result0, LOC52); i0 += ((NI) 1); } goto LA18; LA50: ; { Tnode290802* arg0; Ropeobj177006* LOC58; if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(91))) goto LA54; arg0 = skipaddrderef_539433_839829468((*ri_539702_839829468).kindU.S6.sons->data[j0]); { while (1) { if (!((*arg0).kind == ((Tnodekind290020) 63) || (*arg0).kind == ((Tnodekind290020) 64) || (*arg0).kind == ((Tnodekind290020) 66))) goto LA57; arg0 = HEX5BHEX5D_291238_850551059(arg0, ((NI) 0)); } LA57: ; } LOC58 = (Ropeobj177006*)0; LOC58 = genargnoparam_537938_839829468(p0, arg0); add_177482_2381377266(&result0, LOC58); } goto LA18; LA54: ; { Ropeobj177006* LOC60; LOC60 = (Ropeobj177006*)0; LOC60 = genotherarg_537277_839829468(p0, ri_539702_839829468, j0, typ_539704_839829468); add_177482_2381377266(&result0, LOC60); } LA18: ; j0 += ((NI) 1); i0 += ((NI) 1); } break; case 39: { NI idx0; NI stars0; idx0 = (NI)0; stars0 = (NI)0; { NIM_BOOL LOC64; Ttype290840* t0; LOC64 = (NIM_BOOL)0; LOC64 = scancppgenericslot_532827_839829468(pat0, (&i0), (&idx0), (&stars0)); if (!LOC64) goto LA65; t0 = resolvestarsincpptype_532891_839829468(typ_539704_839829468, idx0, stars0); { TY531289 LOC71; Ropeobj177006* LOC72; if (!(t0 == NIM_NIL)) goto LA69; memset((void*)LOC71, 0, sizeof(LOC71)); LOC72 = (Ropeobj177006*)0; LOC72 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_26), LOC71, 0); add_177482_2381377266(&result0, LOC72); } goto LA67; LA69: ; { Ropeobj177006* LOC74; LOC74 = (Ropeobj177006*)0; LOC74 = gettypedesc_533671_839829468((*p0).module, t0); add_177482_2381377266(&result0, LOC74); } LA67: ; } LA65: ; } break; default: { NI start0; start0 = i0; { while (1) { if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA77; { if (!!((((NU8)(pat0->data[i0])) == ((NU8)(64)) || ((NU8)(pat0->data[i0])) == ((NU8)(35)) || ((NU8)(pat0->data[i0])) == ((NU8)(39))))) goto LA80; i0 += ((NI) 1); } goto LA78; LA80: ; { goto LA76; } LA78: ; } LA77: ; } LA76: ; { NimStringDesc* LOC87; if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA85; LOC87 = (NimStringDesc*)0; LOC87 = copyStrLast(pat0, start0, (NI)(i0 - ((NI) 1))); add_177487_2381377266(&result0, LOC87); } LA85: ; } break; } } LA2: ; } return result0; } N_NIMCALL(void, fixupcall_537410_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* d0, Ropeobj177006* callee0, Ropeobj177006* params0) { Ropeobj177006* pl0; TY531289 LOC1; Ropeobj177006* LOC2; Ropeobj177006* LOC3; Ttype290840* typ0; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (Ropeobj177006*)0; LOC2 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_118), LOC1, 0); LOC3 = (Ropeobj177006*)0; LOC3 = HEX26_177418_2381377266(callee0, LOC2); pl0 = HEX26_177418_2381377266(LOC3, params0); typ0 = skiptypes_294099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA6; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = isinvalidreturntype_531548_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC10) goto LA11; { TY531289 LOC17; Ropeobj177006* LOC18; if (!!((params0 == NIM_NIL))) goto LA15; memset((void*)LOC17, 0, sizeof(LOC17)); LOC18 = (Ropeobj177006*)0; LOC18 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC17, 0); add_177482_2381377266(&pl0, LOC18); } LA15: ; { NIM_BOOL LOC21; NIM_BOOL LOC23; Ropeobj177006* LOC36; TY531289 LOC37; Ropeobj177006* LOC38; LOC21 = (NIM_BOOL)0; LOC21 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0); if (LOC21) goto LA22; LOC23 = (NIM_BOOL)0; LOC23 = leftappearsonrightside_537329_839829468(le0, ri0); LOC21 = !(LOC23); LA22: ; if (!LOC21) goto LA24; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA28; gettemp_535032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } goto LA26; LA28: ; { NIM_BOOL LOC31; NIM_BOOL LOC33; LOC31 = (NIM_BOOL)0; LOC31 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0)); if (!(LOC31)) goto LA32; LOC33 = (NIM_BOOL)0; LOC33 = hasnoinit_537383_839829468(ri0); LOC31 = !(LOC33); LA32: ; if (!LOC31) goto LA34; resetloc_536350_839829468(p0, d0); } goto LA26; LA34: ; LA26: ; LOC36 = (Ropeobj177006*)0; LOC36 = addrloc_536204_839829468((*d0)); add_177482_2381377266(&pl0, LOC36); memset((void*)LOC37, 0, sizeof(LOC37)); LOC38 = (Ropeobj177006*)0; LOC38 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_505), LOC37, 0); add_177482_2381377266(&pl0, LOC38); line_530690_839829468(p0, ((Tcprocsection527011) 2), pl0); } goto LA19; LA24: ; { Tloc290816 tmp0; Ropeobj177006* LOC40; TY531289 LOC41; Ropeobj177006* LOC42; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_535032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC40 = (Ropeobj177006*)0; LOC40 = addrloc_536204_839829468(tmp0); add_177482_2381377266(&pl0, LOC40); memset((void*)LOC41, 0, sizeof(LOC41)); LOC42 = (Ropeobj177006*)0; LOC42 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_505), LOC41, 0); add_177482_2381377266(&pl0, LOC42); line_530690_839829468(p0, ((Tcprocsection527011) 2), pl0); genassignment_537264_839829468(p0, (*d0), tmp0, 0); } LA19: ; } goto LA8; LA11: ; { TY531289 LOC44; Ropeobj177006* LOC45; memset((void*)LOC44, 0, sizeof(LOC44)); LOC45 = (Ropeobj177006*)0; LOC45 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_117), LOC44, 0); add_177482_2381377266(&pl0, LOC45); { NIM_BOOL LOC48; NIM_BOOL LOC49; LOC48 = (NIM_BOOL)0; LOC49 = (NIM_BOOL)0; LOC49 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC49) goto LA50; LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA50: ; LOC48 = LOC49; if (!(LOC48)) goto LA51; LOC48 = (((*d0).flags &(1U<<((NU)(((Tlocflag290810) 8))&15U)))!=0); LA51: ; if (!LOC48) goto LA52; (*d0).k = ((Tlockind290808) 9); unsureAsgnRef((void**) (&(*d0).r), pl0); (*d0).flags &= ~(((NU16)1) << ((((Tlocflag290810) 8)) % (sizeof(NU16)*8))); } goto LA46; LA52: ; { Tloc290816 list0; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA57; gettemp_535032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA57: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_530273_839829468((&list0), ((Tlockind290808) 9), (*d0).t, ((Tstorageloc290812) 0)); list0.r = pl0; genassignment_537264_839829468(p0, (*d0), list0, 0); } LA46: ; } LA8: ; } goto LA4; LA6: ; { TY531289 LOC60; Ropeobj177006* LOC61; memset((void*)LOC60, 0, sizeof(LOC60)); LOC61 = (Ropeobj177006*)0; LOC61 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_505), LOC60, 0); add_177482_2381377266(&pl0, LOC61); line_530690_839829468(p0, ((Tcprocsection527011) 2), pl0); } LA4: ; } N_NIMCALL(void, geninfixcall_539929_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* d0) { Tloc290816 op0; Ttype290840* typ_539940_839829468; NI length0; NimStringDesc* pat0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_537283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); typ_539940_839829468 = skiptypes_294099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_293351_850551059(ri0); pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data; { NimStringDesc* LOC5; if (!!(!((pat0 == NIM_NIL)))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_194185_1689653243(T839829468_498); internalerror_194113_155036129(LOC5); } LA3: ; { NIM_BOOL LOC8; Ropeobj177006* pl0; Ttype290840* typ0; LOC8 = (NIM_BOOL)0; LOC8 = contains_109056_4286263276(pat0, T839829468_500); if (!LOC8) goto LA9; pl0 = genpatterncall_539699_839829468(p0, ri0, pat0, typ_539940_839829468); typ0 = skiptypes_294099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA13; { NIM_BOOL LOC17; NIM_BOOL LOC18; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC18) goto LA19; LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA19: ; LOC17 = LOC18; if (!(LOC17)) goto LA20; LOC17 = (((*d0).flags &(1U<<((NU)(((Tlocflag290810) 8))&15U)))!=0); LA20: ; if (!LOC17) goto LA21; (*d0).k = ((Tlockind290808) 9); unsureAsgnRef((void**) (&(*d0).r), pl0); (*d0).flags &= ~(((NU16)1) << ((((Tlocflag290810) 8)) % (sizeof(NU16)*8))); } goto LA15; LA21: ; { Tloc290816 list0; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA26; gettemp_535032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA26: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_530273_839829468((&list0), ((Tlockind290808) 9), (*d0).t, ((Tstorageloc290812) 0)); list0.r = pl0; genassignment_537264_839829468(p0, (*d0), list0, 0); } LA15: ; } goto LA11; LA13: ; { TY531289 LOC29; Ropeobj177006* LOC30; memset((void*)LOC29, 0, sizeof(LOC29)); LOC30 = (Ropeobj177006*)0; LOC30 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_497), LOC29, 0); add_177482_2381377266(&pl0, LOC30); line_530690_839829468(p0, ((Tcprocsection527011) 2), pl0); } LA11: ; } goto LA6; LA9: ; { Ropeobj177006* pl0; Ropeobj177006* params0; pl0 = NIM_NIL; { NI LOC34; Ropeobj177006* LOC37; LOC34 = (NI)0; LOC34 = len_291081_850551059(ri0); if (!(((NI) 1) < LOC34)) goto LA35; LOC37 = (Ropeobj177006*)0; LOC37 = genthisarg_539475_839829468(p0, ri0, ((NI) 1), typ_539940_839829468); add_177482_2381377266(&pl0, LOC37); } LA35: ; add_177482_2381377266(&pl0, op0.r); params0 = (Ropeobj177006*)0; { NI i_540425_839829468; NI HEX3Atmp_540609_839829468; NI res_540612_839829468; i_540425_839829468 = (NI)0; HEX3Atmp_540609_839829468 = (NI)0; HEX3Atmp_540609_839829468 = (NI)(length0 - ((NI) 1)); res_540612_839829468 = ((NI) 2); { while (1) { Ropeobj177006* LOC47; if (!(res_540612_839829468 <= HEX3Atmp_540609_839829468)) goto LA40; i_540425_839829468 = res_540612_839829468; { TY531289 LOC45; Ropeobj177006* LOC46; if (!!((params0 == NIM_NIL))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (Ropeobj177006*)0; LOC46 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC45, 0); add_177482_2381377266(&params0, LOC46); } LA43: ; LOC47 = (Ropeobj177006*)0; LOC47 = genotherarg_537277_839829468(p0, ri0, i_540425_839829468, typ_539940_839829468); add_177482_2381377266(&params0, LOC47); res_540612_839829468 += ((NI) 1); } LA40: ; } } fixupcall_537410_839829468(p0, le0, ri0, d0, pl0, params0); } LA6: ; } N_NIMCALL(void, gennamedparamcall_540616_839829468)(Tcproc527021* p0, Tnode290802* ri0, Tloc290816* d0) { Tloc290816 op0; Ropeobj177006* pl0; TY531289 LOC1; Ttype290840* typ0; NI length0; NimStringDesc* pat0; NI start0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_537283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); memset((void*)LOC1, 0, sizeof(LOC1)); pl0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_506), LOC1, 0); typ0 = skiptypes_294099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_293351_850551059(ri0); pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data; { NimStringDesc* LOC6; if (!!(!((pat0 == NIM_NIL)))) goto LA4; LOC6 = (NimStringDesc*)0; LOC6 = HEX24_194185_1689653243(T839829468_507); internalerror_194113_155036129(LOC6); } LA4: ; start0 = ((NI) 3); { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = contains_109046_4286263276(pat0, 32); if (!LOC9) goto LA10; start0 = ((NI) 1); add_177482_2381377266(&pl0, op0.r); { TY531289 LOC16; Ropeobj177006* LOC17; Ropeobj177006* LOC18; if (!(((NI) 1) < length0)) goto LA14; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (Ropeobj177006*)0; LOC17 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_244), LOC16, 0); add_177482_2381377266(&pl0, LOC17); LOC18 = (Ropeobj177006*)0; LOC18 = genarg_537787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0); add_177482_2381377266(&pl0, LOC18); start0 = ((NI) 2); } LA14: ; } goto LA7; LA10: ; { { Ropeobj177006* LOC24; TY531289 LOC25; Ropeobj177006* LOC26; if (!(((NI) 1) < length0)) goto LA22; LOC24 = (Ropeobj177006*)0; LOC24 = genarg_537787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0); add_177482_2381377266(&pl0, LOC24); memset((void*)LOC25, 0, sizeof(LOC25)); LOC26 = (Ropeobj177006*)0; LOC26 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_111), LOC25, 0); add_177482_2381377266(&pl0, LOC26); } LA22: ; add_177482_2381377266(&pl0, op0.r); { TY531289 LOC31; Ropeobj177006* LOC32; Ropeobj177006* LOC33; if (!(((NI) 2) < length0)) goto LA29; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj177006*)0; LOC32 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_244), LOC31, 0); add_177482_2381377266(&pl0, LOC32); LOC33 = (Ropeobj177006*)0; LOC33 = genarg_537787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 2)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 2)]).kindU.S4.sym, ri0); add_177482_2381377266(&pl0, LOC33); } LA29: ; } LA7: ; { NI i_541051_839829468; NI HEX3Atmp_541617_839829468; NI res_541620_839829468; i_541051_839829468 = (NI)0; HEX3Atmp_541617_839829468 = (NI)0; HEX3Atmp_541617_839829468 = (NI)(length0 - ((NI) 1)); res_541620_839829468 = start0; { while (1) { Tsym290834* param0; TY531289 LOC42; Ropeobj177006* LOC43; TY531289 LOC44; Ropeobj177006* LOC45; Ropeobj177006* LOC46; if (!(res_541620_839829468 <= HEX3Atmp_541617_839829468)) goto LA36; i_541051_839829468 = res_541620_839829468; { NI LOC39; LOC39 = (NI)0; LOC39 = sonslen_293327_850551059(typ0); if (!(LOC39 <= i_541051_839829468)) goto LA40; internalerror_194100_155036129((*ri0).info, ((NimStringDesc*) &T839829468_508)); } LA40: ; param0 = (*(*(*typ0).n).kindU.S6.sons->data[i_541051_839829468]).kindU.S4.sym; memset((void*)LOC42, 0, sizeof(LOC42)); LOC43 = (Ropeobj177006*)0; LOC43 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_111), LOC42, 0); add_177482_2381377266(&pl0, LOC43); add_177487_2381377266(&pl0, (*(*param0).name).s); memset((void*)LOC44, 0, sizeof(LOC44)); LOC45 = (Ropeobj177006*)0; LOC45 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_244), LOC44, 0); add_177482_2381377266(&pl0, LOC45); LOC46 = (Ropeobj177006*)0; LOC46 = genarg_537787_839829468(p0, (*ri0).kindU.S6.sons->data[i_541051_839829468], param0, ri0); add_177482_2381377266(&pl0, LOC46); res_541620_839829468 += ((NI) 1); } LA36: ; } } { if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA49; { NIM_BOOL LOC53; LOC53 = (NIM_BOOL)0; LOC53 = isinvalidreturntype_531548_839829468((*typ0).sons->data[((NI) 0)]); if (!LOC53) goto LA54; { NI LOC58; TY531289 LOC61; Ropeobj177006* LOC62; LOC58 = (NI)0; LOC58 = sonslen_293351_850551059(ri0); if (!(((NI) 1) < LOC58)) goto LA59; memset((void*)LOC61, 0, sizeof(LOC61)); LOC62 = (Ropeobj177006*)0; LOC62 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_111), LOC61, 0); add_177482_2381377266(&pl0, LOC62); } LA59: ; { TY531289 LOC71; Ropeobj177006* LOC72; Ropeobj177006* LOC73; TY531289 LOC74; Ropeobj177006* LOC75; if (!((3 &(1U<<((NU)((*d0).k)&15U)))!=0)) goto LA65; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA69; gettemp_535032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE); } LA69: ; memset((void*)LOC71, 0, sizeof(LOC71)); LOC72 = (Ropeobj177006*)0; LOC72 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_509), LOC71, 0); add_177482_2381377266(&pl0, LOC72); LOC73 = (Ropeobj177006*)0; LOC73 = addrloc_536204_839829468((*d0)); add_177482_2381377266(&pl0, LOC73); memset((void*)LOC74, 0, sizeof(LOC74)); LOC75 = (Ropeobj177006*)0; LOC75 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_510), LOC74, 0); add_177482_2381377266(&pl0, LOC75); line_530690_839829468(p0, ((Tcprocsection527011) 2), pl0); } goto LA63; LA65: ; { Tloc290816 tmp0; Ropeobj177006* LOC77; TY531289 LOC78; Ropeobj177006* LOC79; memset((void*)(&tmp0), 0, sizeof(tmp0)); gettemp_535032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE); LOC77 = (Ropeobj177006*)0; LOC77 = addrloc_536204_839829468(tmp0); add_177482_2381377266(&pl0, LOC77); memset((void*)LOC78, 0, sizeof(LOC78)); LOC79 = (Ropeobj177006*)0; LOC79 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_510), LOC78, 0); add_177482_2381377266(&pl0, LOC79); line_530690_839829468(p0, ((Tcprocsection527011) 2), pl0); genassignment_537264_839829468(p0, (*d0), tmp0, 0); } LA63: ; } goto LA51; LA54: ; { TY531289 LOC81; Ropeobj177006* LOC82; Tloc290816 list0; memset((void*)LOC81, 0, sizeof(LOC81)); LOC82 = (Ropeobj177006*)0; LOC82 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_511), LOC81, 0); add_177482_2381377266(&pl0, LOC82); { if (!((*d0).k == ((Tlockind290808) 0))) goto LA85; gettemp_535032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE); } LA85: ; memset((void*)(&list0), 0, sizeof(list0)); initloc_530273_839829468((&list0), ((Tlockind290808) 9), NIM_NIL, ((Tstorageloc290812) 0)); list0.r = pl0; genassignment_537264_839829468(p0, (*d0), list0, 0); } LA51: ; } goto LA47; LA49: ; { TY531289 LOC88; Ropeobj177006* LOC89; memset((void*)LOC88, 0, sizeof(LOC88)); LOC89 = (Ropeobj177006*)0; LOC89 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_510), LOC88, 0); add_177482_2381377266(&pl0, LOC89); line_530690_839829468(p0, ((Tcprocsection527011) 2), pl0); } LA47: ; } N_NIMCALL(void, genprefixcall_537960_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* d0) { Tloc290816 op0; Ropeobj177006* params0; Ttype290840* typ0; NI length0; memset((void*)(&op0), 0, sizeof(op0)); initlocexpr_537283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0)); params0 = (Ropeobj177006*)0; typ0 = skiptypes_294099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); length0 = sonslen_293351_850551059(ri0); { NI i_538213_839829468; NI HEX3Atmp_538445_839829468; NI res_538448_839829468; i_538213_839829468 = (NI)0; HEX3Atmp_538445_839829468 = (NI)0; HEX3Atmp_538445_839829468 = (NI)(length0 - ((NI) 1)); res_538448_839829468 = ((NI) 1); { while (1) { if (!(res_538448_839829468 <= HEX3Atmp_538445_839829468)) goto LA3; i_538213_839829468 = res_538448_839829468; { NI LOC6; Tnode290802* paramtype0; LOC6 = (NI)0; LOC6 = sonslen_293327_850551059(typ0); if (!(i_538213_839829468 < LOC6)) goto LA7; paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_538213_839829468]; { NIM_BOOL LOC11; Ropeobj177006* LOC20; LOC11 = (NIM_BOOL)0; LOC11 = iscompiletimeonly_326706_3876443242((*paramtype0).typ); if (!!(LOC11)) goto LA12; { TY531289 LOC18; Ropeobj177006* LOC19; if (!!((params0 == NIM_NIL))) goto LA16; memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (Ropeobj177006*)0; LOC19 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0); add_177482_2381377266(&params0, LOC19); } LA16: ; LOC20 = (Ropeobj177006*)0; LOC20 = genarg_537787_839829468(p0, (*ri0).kindU.S6.sons->data[i_538213_839829468], (*paramtype0).kindU.S4.sym, ri0); add_177482_2381377266(&params0, LOC20); } LA12: ; } goto LA4; LA7: ; { Ropeobj177006* LOC28; { TY531289 LOC26; Ropeobj177006* LOC27; if (!!((params0 == NIM_NIL))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC27 = (Ropeobj177006*)0; LOC27 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0); add_177482_2381377266(&params0, LOC27); } LA24: ; LOC28 = (Ropeobj177006*)0; LOC28 = genargnoparam_537938_839829468(p0, (*ri0).kindU.S6.sons->data[i_538213_839829468]); add_177482_2381377266(&params0, LOC28); } LA4: ; res_538448_839829468 += ((NI) 1); } LA3: ; } } fixupcall_537410_839829468(p0, le0, ri0, d0, op0.r, params0); } static N_INLINE(void, poststmtactions_530942_839829468)(Tcproc527021* p0) { Ropeobj177006** LOC1; LOC1 = (Ropeobj177006**)0; LOC1 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); add_177482_2381377266(LOC1, (*(*p0).module).injectstmt); } N_NIMCALL(void, gencall_541632_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { { Ttype290840* LOC3; LOC3 = (Ttype290840*)0; LOC3 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, 2048); if (!((*LOC3).callconv == ((Tcallingconvention290002) 8))) goto LA4; genclosurecall_538452_839829468(p0, NIM_NIL, e0, d0); } goto LA1; LA4: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3)); if (!(LOC7)) goto LA8; LOC7 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; geninfixcall_539929_839829468(p0, NIM_NIL, e0, d0); } goto LA1; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3)); if (!(LOC12)) goto LA13; LOC12 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag290184) 28))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; gennamedparamcall_540616_839829468(p0, e0, d0); } goto LA1; LA14: ; { genprefixcall_537960_839829468(p0, NIM_NIL, e0, d0); } LA1: ; poststmtactions_530942_839829468(p0); } N_NIMCALL(void, genreset_552731_839829468)(Tcproc527021* p0, Tnode290802* n0) { Tloc290816 a0; TY530811 LOC1; Ttype290840* LOC2; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = addrloc_536204_839829468(a0); LOC2 = (Ttype290840*)0; LOC2 = skiptypes_294099_850551059(a0.t, IL64(211106242013440)); LOC1[1] = gentypeinfo_533941_839829468((*p0).module, LOC2); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_496), LOC1, 2); } N_NIMCALL(void, genecho_552369_839829468)(Tcproc527021* p0, Tnode290802* n0) { NIM_BOOL LOC6; Ropeobj177006* args0; Tloc290816 a0; TY530811 LOC18; NimStringDesc* LOC19; NI LOC20; NimStringDesc* LOC21; TY531289 LOC22; { NimStringDesc* LOC5; if (!!(((*n0).kind == ((Tnodekind290020) 41)))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_194185_1689653243(T839829468_512); internalerror_194113_155036129(LOC5); } LA3: ; LOC6 = (NIM_BOOL)0; LOC6 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_513)); args0 = NIM_NIL; memset((void*)(&a0), 0, sizeof(a0)); { NI i_552404_839829468; NI HEX3Atmp_552431_839829468; NI LOC8; NI res_552434_839829468; i_552404_839829468 = (NI)0; HEX3Atmp_552431_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = len_291081_850551059(n0); HEX3Atmp_552431_839829468 = (NI)(LOC8 - ((NI) 1)); res_552434_839829468 = ((NI) 0); { while (1) { if (!(res_552434_839829468 <= HEX3Atmp_552431_839829468)) goto LA10; i_552404_839829468 = res_552434_839829468; { Tnode290802* LOC13; LOC13 = (Tnode290802*)0; LOC13 = skipconv_326882_3876443242((*n0).kindU.S6.sons->data[i_552404_839829468]); if (!((*LOC13).kind == ((Tnodekind290020) 23))) goto LA14; add_177487_2381377266(&args0, ((NimStringDesc*) &T839829468_514)); } goto LA11; LA14: ; { TY177507 LOC17; initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[i_552404_839829468], (&a0)); memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rdloc_536188_839829468(a0); addf_178205_2381377266(&args0, ((NimStringDesc*) &T839829468_515), LOC17, 1); } LA11: ; res_552434_839829468 += ((NI) 1); } LA10: ; } } memset((void*)LOC18, 0, sizeof(LOC18)); LOC19 = (NimStringDesc*)0; LOC20 = (NI)0; LOC20 = len_291081_850551059(n0); LOC21 = (NimStringDesc*)0; LOC21 = nsuRepeatStr(((NimStringDesc*) &T839829468_517), ((NI) (LOC20))); LOC19 = rawNewString(LOC21->Sup.len + tnl_175644_4151366050->Sup.len + 0); appendString(LOC19, LOC21); appendString(LOC19, tnl_175644_4151366050); LOC18[0] = makecstring_189638_155036129(LOC19); LOC18[1] = args0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_516), LOC18, 2); memset((void*)LOC22, 0, sizeof(LOC22)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_518), LOC22, 0); } N_NIMCALL(void, genseqconstr_553004_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0) { Tloc290816 arr0; NI LOC5; Ropeobj177006* LOC6; memset((void*)(&arr0), 0, sizeof(arr0)); { if (!((*d0).k == ((Tlockind290808) 0))) goto LA3; gettemp_535032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA3: ; LOC5 = (NI)0; LOC5 = sonslen_293351_850551059(t0); LOC6 = (Ropeobj177006*)0; LOC6 = intliteral_537270_839829468(((NI64) (LOC5))); gennewseqaux_552795_839829468(p0, (*d0), LOC6); { NI i_553031_839829468; NI HEX3Atmp_553039_839829468; NI LOC8; NI res_553042_839829468; i_553031_839829468 = (NI)0; HEX3Atmp_553039_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = sonslen_293351_850551059(t0); HEX3Atmp_553039_839829468 = (NI)(LOC8 - ((NI) 1)); res_553042_839829468 = ((NI) 0); { while (1) { Ttype290840* LOC11; Ttype290840* LOC12; TY530811 LOC13; if (!(res_553042_839829468 <= HEX3Atmp_553039_839829468)) goto LA10; i_553031_839829468 = res_553042_839829468; LOC11 = (Ttype290840*)0; LOC11 = skiptypes_294099_850551059((*t0).typ, IL64(211106232576256)); LOC12 = (Ttype290840*)0; LOC12 = elemtype_318394_3876443242(LOC11); initloc_530273_839829468((&arr0), ((Tlockind290808) 6), LOC12, ((Tstorageloc290812) 3)); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = rdloc_536188_839829468((*d0)); LOC13[1] = intliteral_537270_839829468(((NI64) (i_553031_839829468))); arr0.r = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC13, 2); arr0.s = ((Tstorageloc290812) 3); expr_537248_839829468(p0, (*t0).kindU.S6.sons->data[i_553031_839829468], (&arr0)); res_553042_839829468 += ((NI) 1); } LA10: ; } } gcusage_552439_839829468(t0); } N_NIMCALL(void, genarrtoseq_553046_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0) { Tloc290816 elem0; Tloc290816 a0; Tloc290816 arr0; NI L0; NI64 LOC9; Ropeobj177006* LOC10; { memset((void*)(&elem0), 0, sizeof(elem0)); memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&arr0), 0, sizeof(arr0)); { if (!((*t0).kind == ((Tnodekind290020) 41))) goto LA3; asgnRefNoCycle((void**) (&(*(*t0).kindU.S6.sons->data[((NI) 1)]).typ), (*t0).typ); genseqconstr_553004_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], d0); goto BeforeRet; } LA3: ; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA7; gettemp_535032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA7: ; LOC9 = (NI64)0; LOC9 = lengthord_318007_3876443242((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ); L0 = ((NI) (LOC9)); LOC10 = (Ropeobj177006*)0; LOC10 = intliteral_537270_839829468(((NI64) (L0))); gennewseqaux_552795_839829468(p0, (*d0), LOC10); initlocexpr_537283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], (&a0)); { NI i_553090_839829468; NI HEX3Atmp_553103_839829468; NI res_553106_839829468; i_553090_839829468 = (NI)0; HEX3Atmp_553103_839829468 = (NI)0; HEX3Atmp_553103_839829468 = (NI)(L0 - ((NI) 1)); res_553106_839829468 = ((NI) 0); { while (1) { Ttype290840* LOC14; Ttype290840* LOC15; TY530811 LOC16; Ttype290840* LOC17; Ttype290840* LOC18; TY530811 LOC19; if (!(res_553106_839829468 <= HEX3Atmp_553103_839829468)) goto LA13; i_553090_839829468 = res_553106_839829468; LOC14 = (Ttype290840*)0; LOC14 = skiptypes_294099_850551059((*t0).typ, IL64(211106232576256)); LOC15 = (Ttype290840*)0; LOC15 = elemtype_318394_3876443242(LOC14); initloc_530273_839829468((&elem0), ((Tlockind290808) 6), LOC15, ((Tstorageloc290812) 3)); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_536188_839829468((*d0)); LOC16[1] = intliteral_537270_839829468(((NI64) (i_553090_839829468))); elem0.r = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC16, 2); elem0.s = ((Tstorageloc290812) 3); LOC17 = (Ttype290840*)0; LOC17 = skiptypes_294099_850551059((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106232576256)); LOC18 = (Ttype290840*)0; LOC18 = elemtype_318394_3876443242(LOC17); initloc_530273_839829468((&arr0), ((Tlockind290808) 6), LOC18, a0.s); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_536188_839829468(a0); LOC19[1] = intliteral_537270_839829468(((NI64) (i_553090_839829468))); arr0.r = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC19, 2); genassignment_537264_839829468(p0, elem0, arr0, 3); res_553106_839829468 += ((NI) 1); } LA13: ; } } }BeforeRet: ; } N_NIMCALL(void, gendeepcopy_548374_839829468)(Tcproc527021* p0, Tloc290816 dest0, Tloc290816 src0) { Ttype290840* ty0; ty0 = skiptypes_294099_850551059(dest0.t, IL64(211106242013440)); switch ((*ty0).kind) { case ((Ttypekind290244) 21): case ((Ttypekind290244) 22): case ((Ttypekind290244) 25): case ((Ttypekind290244) 18): case ((Ttypekind290244) 17): case ((Ttypekind290244) 16): case ((Ttypekind290244) 4): { TY533238 LOC2; memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = addrloc_536204_839829468(dest0); LOC2[1] = addrloc_536204_839829468(src0); LOC2[2] = gentypeinfo_533941_839829468((*p0).module, dest0.t); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_519), LOC2, 3); } break; case ((Ttypekind290244) 24): case ((Ttypekind290244) 28): { TY533238 LOC4; memset((void*)LOC4, 0, sizeof(LOC4)); LOC4[0] = addrloc_536204_839829468(dest0); LOC4[1] = rdloc_536188_839829468(src0); LOC4[2] = gentypeinfo_533941_839829468((*p0).module, dest0.t); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_520), LOC4, 3); } break; case ((Ttypekind290244) 27): case ((Ttypekind290244) 48): { TY533238 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = addrloc_536204_839829468(dest0); LOC6[1] = addrloc_536204_839829468(src0); LOC6[2] = gentypeinfo_533941_839829468((*p0).module, dest0.t); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_521), LOC6, 3); } break; case ((Ttypekind290244) 19): { { Tctypekind527007 LOC10; TY533238 LOC13; NI64 LOC14; LOC10 = (Tctypekind527007)0; LOC10 = maptype_531393_839829468(ty0); if (!(LOC10 == ((Tctypekind527007) 17))) goto LA11; usestringh_530345_839829468((*p0).module); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = rdloc_536188_839829468(dest0); LOC13[1] = rdloc_536188_839829468(src0); LOC14 = (NI64)0; LOC14 = getsize_318135_3876443242(dest0.t); LOC13[2] = rope_177401_2381377266(LOC14); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_268), LOC13, 3); } goto LA8; LA11: ; { TY530811 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_536188_839829468(dest0); LOC16[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC16, 2); } LA8: ; } break; case ((Ttypekind290244) 26): case ((Ttypekind290244) 2): case ((Ttypekind290244) 1): case ((Ttypekind290244) 14): case ((Ttypekind290244) 29): case ((Ttypekind290244) 31) ... ((Ttypekind290244) 44): case ((Ttypekind290244) 20): case ((Ttypekind290244) 23): { TY530811 LOC18; memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rdloc_536188_839829468(dest0); LOC18[1] = rdloc_536188_839829468(src0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_123), LOC18, 2); } break; default: { NimStringDesc* LOC20; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI290244))->Sup.len + 13); appendString(LOC20, ((NimStringDesc*) &T839829468_522)); appendString(LOC20, reprEnum((NI)(*ty0).kind, (&NTI290244))); internalerror_194113_155036129(LOC20); } break; } } N_NIMCALL(void, genmagicexpr_555033_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tmagic290524 op0) { switch (op0) { case ((Tmagic290524) 127): case ((Tmagic290524) 126): { genandor_552311_839829468(p0, e0, d0, op0); } break; case ((Tmagic290524) 99) ... ((Tmagic290524) 117): { unaryarith_550646_839829468(p0, e0, d0, op0); } break; case ((Tmagic290524) 96) ... ((Tmagic290524) 98): { unaryarithoverflow_549633_839829468(p0, e0, d0, op0); } break; case ((Tmagic290524) 52) ... ((Tmagic290524) 55): { binaryfloatarith_554728_839829468(p0, e0, d0, op0); } break; case ((Tmagic290524) 56) ... ((Tmagic290524) 93): { binaryarith_549819_839829468(p0, e0, d0, op0); } break; case ((Tmagic290524) 95): { geneqproc_550214_839829468(p0, e0, d0); } break; case ((Tmagic290524) 45) ... ((Tmagic290524) 51): { binaryarithoverflow_549262_839829468(p0, e0, d0, op0); } break; case ((Tmagic290524) 149): { genrepr_553339_839829468(p0, e0, d0); } break; case ((Tmagic290524) 259): { gengettypeinfo_553383_839829468(p0, e0, d0); } break; case ((Tmagic290524) 156): { genswap_553638_839829468(p0, e0, d0); } break; case ((Tmagic290524) 25): { { if (!!((((*p0).options &(1U<<((NU)(((Toption168009) 5))&31U)))!=0))) goto LA14; unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_385)); } goto LA12; LA14: ; { unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_386)); } LA12: ; } break; case ((Tmagic290524) 26): case ((Tmagic290524) 27): { Ttype290840* underlying0; underlying0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 9439232); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = !((((*p0).options &(1U<<((NU)(((Toption168009) 5))&31U)))!=0)); if (LOC20) goto LA21; LOC20 = ((*underlying0).kind >= ((Ttypekind290244) 40) && (*underlying0).kind <= ((Ttypekind290244) 44)); LA21: ; if (!LOC20) goto LA22; binarystmt_548501_839829468(p0, e0, d0, opr_555050_839829468[(op0)- 26]); } goto LA18; LA22: ; { Tloc290816 a0; Tloc290816 b0; Ttype290840* ranged0; Ropeobj177006* res0; NimStringDesc* LOC25; TY530811 LOC31; Ropeobj177006* LOC32; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); ranged0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 8390656); LOC25 = (NimStringDesc*)0; { if (!((*underlying0).kind == ((Ttypekind290244) 35))) goto LA28; LOC25 = copyString(fun64_555055_839829468[(op0)- 26]); } goto LA26; LA28: ; { LOC25 = copyString(fun_555060_839829468[(op0)- 26]); } LA26: ; res0 = binaryarithoverflowraw_549235_839829468(p0, ranged0, a0, b0, LOC25); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = gettypedesc_533671_839829468((*p0).module, ranged0); LOC31[1] = res0; LOC32 = (Ropeobj177006*)0; LOC32 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_370), LOC31, 2); putintodest_548468_839829468(p0, (&a0), ranged0, LOC32, ((Tstorageloc290812) 0)); } LA18: ; } break; case ((Tmagic290524) 138): { genstrconcat_552452_839829468(p0, e0, d0); } break; case ((Tmagic290524) 144): { binarystmt_548501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_394)); } break; case ((Tmagic290524) 145): { genstrappend_552554_839829468(p0, e0, d0); } break; case ((Tmagic290524) 146): { genseqelemappend_552683_839829468(p0, e0, d0); } break; case ((Tmagic290524) 128): { genstrequals_554666_839829468(p0, e0, d0); } break; case ((Tmagic290524) 129): { binaryexpr_548549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_402)); } break; case ((Tmagic290524) 130): { binaryexpr_548549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_403)); } break; case ((Tmagic290524) 157): { genisnil_550620_839829468(p0, e0, d0); } break; case ((Tmagic290524) 120): { gendollar_553391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_406)); } break; case ((Tmagic290524) 121): { gendollar_553391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_407)); } break; case ((Tmagic290524) 119): { gendollar_553391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_408)); } break; case ((Tmagic290524) 118): { gendollar_553391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_409)); } break; case ((Tmagic290524) 122): { gendollar_553391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_410)); } break; case ((Tmagic290524) 123): { gendollar_553391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_411)); } break; case ((Tmagic290524) 124): { expr_537248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Tmagic290524) 125): { genrepr_553339_839829468(p0, e0, d0); } break; case ((Tmagic290524) 12): { genof_553331_839829468(p0, e0, d0); } break; case ((Tmagic290524) 29): { gennew_552782_839829468(p0, e0); } break; case ((Tmagic290524) 30): { gennewfinalize_553110_839829468(p0, e0); } break; case ((Tmagic290524) 31): { gennewseq_552824_839829468(p0, e0); } break; case ((Tmagic290524) 32): { gennewseqofcap_552836_839829468(p0, e0, d0); } break; case ((Tmagic290524) 9): { Ttype290840* t0; TY177507 LOC55; Ropeobj177006* LOC56; t0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 256); memset((void*)LOC55, 0, sizeof(LOC55)); LOC55[0] = gettypedesc_533671_839829468((*p0).module, t0); LOC56 = (Ropeobj177006*)0; LOC56 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_428), LOC55, 1); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC56, ((Tstorageloc290812) 0)); } break; case ((Tmagic290524) 42): { gensomecast_554480_839829468(p0, e0, d0); } break; case ((Tmagic290524) 28): { genord_554474_839829468(p0, e0, d0); } break; case ((Tmagic290524) 35): case ((Tmagic290524) 8): case ((Tmagic290524) 34): case ((Tmagic290524) 36): case ((Tmagic290524) 33): { genarraylen_553415_839829468(p0, e0, d0, op0); } break; case ((Tmagic290524) 37): case ((Tmagic290524) 38): { { NIM_BOOL LOC63; LOC63 = (NIM_BOOL)0; LOC63 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC63) goto LA64; LOC63 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA64: ; if (!!(LOC63)) goto LA65; unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_440)); } goto LA61; LA65: ; { unaryexpr_549209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_441)); } LA61: ; } break; case ((Tmagic290524) 43): { unarystmt_548527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_443)); } break; case ((Tmagic290524) 44): { unarystmt_548527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_444)); } break; case ((Tmagic290524) 151): { gensetlengthstr_553632_839829468(p0, e0, d0); } break; case ((Tmagic290524) 152): { gensetlengthseq_553500_839829468(p0, e0, d0); } break; case ((Tmagic290524) 39): case ((Tmagic290524) 40): case ((Tmagic290524) 41): case ((Tmagic290524) 133): case ((Tmagic290524) 132): case ((Tmagic290524) 131): case ((Tmagic290524) 134): case ((Tmagic290524) 135): case ((Tmagic290524) 136): case ((Tmagic290524) 148): { gensetop_554419_839829468(p0, e0, d0, op0); } break; case ((Tmagic290524) 161): case ((Tmagic290524) 162): case ((Tmagic290524) 159): case ((Tmagic290524) 160): case ((Tmagic290524) 150): case ((Tmagic290524) 163): { Tsym290834* opr0; opr0 = (*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NimStringDesc* LOC78; Ropeobj177006* LOC79; if (!!((((*opr0).loc.flags &(1U<<((NU)(((Tlocflag290810) 3))&15U)))!=0))) goto LA76; LOC78 = (NimStringDesc*)0; LOC78 = HEX24_177856_2381377266((*opr0).loc.r); LOC79 = (Ropeobj177006*)0; LOC79 = cgsym_530403_839829468((*p0).module, LOC78); } LA76: ; gencall_541632_839829468(p0, e0, d0); } break; case ((Tmagic290524) 164): { genreset_552731_839829468(p0, e0); } break; case ((Tmagic290524) 17): { Tnode290802* LOC82; Tnode290802* LOC83; LOC82 = (Tnode290802*)0; LOC82 = HEX5BHEX5D_291238_850551059(e0, ((NI) 1)); LOC83 = (Tnode290802*)0; LOC83 = skipconv_326882_3876443242(LOC82); genecho_552369_839829468(p0, LOC83); } break; case ((Tmagic290524) 158): { genarrtoseq_553046_839829468(p0, e0, d0); } break; case ((Tmagic290524) 223) ... ((Tmagic290524) 257): case ((Tmagic290524) 19) ... ((Tmagic290524) 24): { localerror_194080_155036129((*e0).info, ((Tmsgkind189002) 229), (*(*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s); } break; case ((Tmagic290524) 208): { Tnode290802* n0; n0 = wrapprocforspawn_433501_2218250499((*(*p0).module).module, e0, (*e0).typ, NIM_NIL, NIM_NIL); expr_537248_839829468(p0, n0, d0); } break; case ((Tmagic290524) 155): { Tnode290802* n0; n0 = liftparallel_476822_1773027539((*(*p0).module).module, e0); expr_537248_839829468(p0, n0, d0); } break; case ((Tmagic290524) 209): { Tloc290816 a0; Tloc290816 b0; Tnode290802* x0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); { Tnode290802* LOC91; Tnode290802* LOC94; LOC91 = (Tnode290802*)0; LOC91 = HEX5BHEX5D_291238_850551059(e0, ((NI) 1)); if (!((*LOC91).kind == ((Tnodekind290020) 63) || (*LOC91).kind == ((Tnodekind290020) 64))) goto LA92; LOC94 = (Tnode290802*)0; LOC94 = HEX5BHEX5D_291238_850551059(e0, ((NI) 1)); x0 = HEX5BHEX5D_291238_850551059(LOC94, ((NI) 0)); } goto LA89; LA92: ; { x0 = HEX5BHEX5D_291238_850551059(e0, ((NI) 1)); } LA89: ; initlocexpr_537283_839829468(p0, x0, (&a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0)); gendeepcopy_548374_839829468(p0, a0, b0); } break; case ((Tmagic290524) 140): case ((Tmagic290524) 94): { gencall_541632_839829468(p0, e0, d0); } break; default: { NimStringDesc* LOC98; LOC98 = (NimStringDesc*)0; LOC98 = rawNewString(reprEnum((NI)op0, (&NTI290524))->Sup.len + 14); appendString(LOC98, ((NimStringDesc*) &T839829468_523)); appendString(LOC98, reprEnum((NI)op0, (&NTI290524))); internalerror_194100_155036129((*e0).info, LOC98); } break; } } N_NIMCALL(Ropeobj177006*, gensetnode_547664_839829468)(Tcproc527021* p0, Tnode290802* n0) { Ropeobj177006* result0; Tbitset337004* cs0; NI size0; NI64 LOC1; result0 = (Ropeobj177006*)0; cs0 = (Tbitset337004*)0; LOC1 = (NI64)0; LOC1 = getsize_318135_3876443242((*n0).typ); size0 = ((NI) (LOC1)); tobitset_338001_452470228(n0, (&cs0)); { NI id0; Ropeobj177006* LOC6; if (!(((NI) 8) < size0)) goto LA4; id0 = nodetabletestorset_340682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC6 = (Ropeobj177006*)0; LOC6 = rope_177401_2381377266(((NI64) (id0))); result0 = HEX26_177418_2381377266((*(*p0).module).tmpbase, LOC6); { TY533238 LOC11; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA9; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_533671_839829468((*p0).module, (*n0).typ); LOC11[1] = result0; LOC11[2] = genrawsetdata_547629_839829468(cs0, size0); addf_178205_2381377266(&(*(*p0).module).s[(((Tcfilesection527005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC11, 3); } LA9: ; } goto LA2; LA4: ; { result0 = genrawsetdata_547629_839829468(cs0, size0); } LA2: ; return result0; } N_NIMCALL(void, gensetconstr_555496_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 b0; Tloc290816 idx0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); memset((void*)(&idx0), 0, sizeof(idx0)); { Ropeobj177006* LOC5; if (!(((*e0).flags &(1U<<((NU)(((Tnodeflag290427) 4))&15U)))!=0)) goto LA3; LOC5 = (Ropeobj177006*)0; LOC5 = gensetnode_547664_839829468(p0, e0); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC5, ((Tstorageloc290812) 0)); } goto LA1; LA3: ; { { if (!((*d0).k == ((Tlockind290808) 0))) goto LA9; gettemp_535032_839829468(p0, (*e0).typ, d0, NIM_FALSE); } LA9: ; { NI64 LOC13; TY177507 LOC16; LOC13 = (NI64)0; LOC13 = getsize_318135_3876443242((*e0).typ); if (!(IL64(8) < LOC13)) goto LA14; usestringh_530345_839829468((*p0).module); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_536188_839829468((*d0)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_525), LOC16, 1); { NI i_555537_839829468; NI HEX3Atmp_555603_839829468; NI LOC18; NI res_555606_839829468; i_555537_839829468 = (NI)0; HEX3Atmp_555603_839829468 = (NI)0; LOC18 = (NI)0; LOC18 = sonslen_293351_850551059(e0); HEX3Atmp_555603_839829468 = (NI)(LOC18 - ((NI) 1)); res_555606_839829468 = ((NI) 0); { while (1) { if (!(res_555606_839829468 <= HEX3Atmp_555603_839829468)) goto LA20; i_555537_839829468 = res_555606_839829468; { Ttype290840* LOC25; TY533235 LOC26; if (!((*(*e0).kindU.S6.sons->data[i_555537_839829468]).kind == ((Tnodekind290020) 44))) goto LA23; LOC25 = (Ttype290840*)0; LOC25 = getsystype_336150_3937434831(((Ttypekind290244) 31)); gettemp_535032_839829468(p0, LOC25, (&idx0), NIM_FALSE); initlocexpr_537283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_555537_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_537283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_555537_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0)); memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rdloc_536188_839829468(idx0); LOC26[1] = rdloc_536188_839829468((*d0)); LOC26[2] = rdsetelemloc_553662_839829468(a0, (*e0).typ); LOC26[3] = rdsetelemloc_553662_839829468(b0, (*e0).typ); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_526), LOC26, 4); } goto LA21; LA23: ; { TY530811 LOC28; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[i_555537_839829468], (&a0)); memset((void*)LOC28, 0, sizeof(LOC28)); LOC28[0] = rdloc_536188_839829468((*d0)); LOC28[1] = rdsetelemloc_553662_839829468(a0, (*e0).typ); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_527), LOC28, 2); } LA21: ; res_555606_839829468 += ((NI) 1); } LA20: ; } } } goto LA11; LA14: ; { NimStringDesc* ts0; NimStringDesc* LOC30; NI64 LOC31; NimStringDesc* LOC32; TY177507 LOC33; LOC30 = (NimStringDesc*)0; LOC31 = (NI64)0; LOC31 = getsize_318135_3876443242((*e0).typ); LOC32 = (NimStringDesc*)0; LOC32 = nimInt64ToStr((NI64)(LOC31 * IL64(8))); LOC30 = rawNewString(LOC32->Sup.len + 2); appendString(LOC30, ((NimStringDesc*) &T839829468_45)); appendString(LOC30, LOC32); ts0 = LOC30; memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rdloc_536188_839829468((*d0)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_494), LOC33, 1); { NI i_555575_839829468; NI HEX3Atmp_555611_839829468; NI LOC35; NI res_555614_839829468; i_555575_839829468 = (NI)0; HEX3Atmp_555611_839829468 = (NI)0; LOC35 = (NI)0; LOC35 = sonslen_293351_850551059(e0); HEX3Atmp_555611_839829468 = (NI)(LOC35 - ((NI) 1)); res_555614_839829468 = ((NI) 0); { while (1) { if (!(res_555614_839829468 <= HEX3Atmp_555611_839829468)) goto LA37; i_555575_839829468 = res_555614_839829468; { Ttype290840* LOC42; NimStringDesc* LOC43; TY533235 LOC44; if (!((*(*e0).kindU.S6.sons->data[i_555575_839829468]).kind == ((Tnodekind290020) 44))) goto LA40; LOC42 = (Ttype290840*)0; LOC42 = getsystype_336150_3937434831(((Ttypekind290244) 31)); gettemp_535032_839829468(p0, LOC42, (&idx0), NIM_FALSE); initlocexpr_537283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_555575_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_537283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_555575_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0)); LOC43 = (NimStringDesc*)0; LOC43 = rawNewString(ts0->Sup.len + ts0->Sup.len + 68); appendString(LOC43, ((NimStringDesc*) &T839829468_528)); appendString(LOC43, ts0); appendString(LOC43, ((NimStringDesc*) &T839829468_529)); appendString(LOC43, ts0); appendString(LOC43, ((NimStringDesc*) &T839829468_454)); memset((void*)LOC44, 0, sizeof(LOC44)); LOC44[0] = rdloc_536188_839829468(idx0); LOC44[1] = rdloc_536188_839829468((*d0)); LOC44[2] = rdsetelemloc_553662_839829468(a0, (*e0).typ); LOC44[3] = rdsetelemloc_553662_839829468(b0, (*e0).typ); linef_530700_839829468(p0, ((Tcprocsection527011) 2), LOC43, LOC44, 4); } goto LA38; LA40: ; { NimStringDesc* LOC46; TY530811 LOC47; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[i_555575_839829468], (&a0)); LOC46 = (NimStringDesc*)0; LOC46 = rawNewString(ts0->Sup.len + ts0->Sup.len + 36); appendString(LOC46, ((NimStringDesc*) &T839829468_530)); appendString(LOC46, ts0); appendString(LOC46, ((NimStringDesc*) &T839829468_531)); appendString(LOC46, ts0); appendString(LOC46, ((NimStringDesc*) &T839829468_454)); memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = rdloc_536188_839829468((*d0)); LOC47[1] = rdsetelemloc_553662_839829468(a0, (*e0).typ); linef_530700_839829468(p0, ((Tcprocsection527011) 2), LOC46, LOC47, 2); } LA38: ; res_555614_839829468 += ((NI) 1); } LA37: ; } } } LA11: ; } LA1: ; } N_NIMCALL(void, exprcomplexconst_556684_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { Ttype290840* t0; Ropeobj177006* LOC1; NI id0; Ropeobj177006* tmp0; Ropeobj177006* LOC2; t0 = getuniquetype_526640_2036603609((*n0).typ); LOC1 = (Ropeobj177006*)0; LOC1 = gettypedesc_533671_839829468((*p0).module, t0); id0 = nodetabletestorset_340682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC2 = (Ropeobj177006*)0; LOC2 = rope_177401_2381377266(((NI64) (id0))); tmp0 = HEX26_177418_2381377266((*(*p0).module).tmpbase, LOC2); { TY533238 LOC7; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA5; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_533671_839829468((*p0).module, t0); LOC7[1] = tmp0; LOC7[2] = genconstexpr_552849_839829468(p0, n0); addf_178205_2381377266(&(*(*p0).module).s[(((Tcfilesection527005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC7, 3); } LA5: ; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA10; fillloc_530282_839829468(d0, ((Tlockind290808) 8), t0, tmp0, ((Tstorageloc290812) 1)); } goto LA8; LA10: ; { putdataintodest_548436_839829468(p0, d0, t0, tmp0); { if (!!(((*t0).kind == ((Ttypekind290244) 24) || (*t0).kind == ((Ttypekind290244) 28)))) goto LA15; (*d0).s = ((Tstorageloc290812) 1); } LA15: ; } LA8: ; } N_NIMCALL(NIM_BOOL, handleconstexpr_552853_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; NI LOC6; Ttype290840* t0; Ropeobj177006* LOC10; NI id0; Ropeobj177006* LOC11; Ropeobj177006* LOC12; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = ((*d0).k == ((Tlockind290808) 0)); if (!(LOC4)) goto LA5; LOC6 = (NI)0; LOC6 = len_291081_850551059(n0); LOC4 = (((NI) (((*n0).kind == ((Tnodekind290020) 38)))) < LOC6); LA5: ; LOC3 = LOC4; if (!(LOC3)) goto LA7; LOC3 = isdeepconstexpr_316566_2616423590(n0); LA7: ; if (!LOC3) goto LA8; t0 = getuniquetype_526640_2036603609((*n0).typ); LOC10 = (Ropeobj177006*)0; LOC10 = gettypedesc_533671_839829468((*p0).module, t0); id0 = nodetabletestorset_340682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels))); LOC11 = (Ropeobj177006*)0; LOC11 = rope_177401_2381377266(((NI64) (id0))); LOC12 = (Ropeobj177006*)0; LOC12 = HEX26_177418_2381377266((*(*p0).module).tmpbase, LOC11); fillloc_530282_839829468(d0, ((Tlockind290808) 8), t0, LOC12, ((Tstorageloc290812) 1)); { TY533238 LOC17; if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA15; (*(*p0).module).labels += ((NI) 1); memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = gettypedesc_533671_839829468((*p0).module, t0); LOC17[1] = (*d0).r; LOC17[2] = genconstexpr_552849_839829468(p0, n0); addf_178205_2381377266(&(*(*p0).module).s[(((Tcfilesection527005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3); } LA15: ; result0 = NIM_TRUE; } goto LA1; LA8: ; { result0 = NIM_FALSE; } LA1: ; return result0; } N_NIMCALL(void, genarrayconstr_556207_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { Tloc290816 arr0; memset((void*)(&arr0), 0, sizeof(arr0)); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_552853_839829468(p0, n0, d0); if (!!(LOC3)) goto LA4; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA8; gettemp_535032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA8: ; { NI i_556234_839829468; NI HEX3Atmp_556242_839829468; NI LOC11; NI res_556245_839829468; i_556234_839829468 = (NI)0; HEX3Atmp_556242_839829468 = (NI)0; LOC11 = (NI)0; LOC11 = sonslen_293351_850551059(n0); HEX3Atmp_556242_839829468 = (NI)(LOC11 - ((NI) 1)); res_556245_839829468 = ((NI) 0); { while (1) { Ttype290840* LOC14; Ttype290840* LOC15; TY530811 LOC16; if (!(res_556245_839829468 <= HEX3Atmp_556242_839829468)) goto LA13; i_556234_839829468 = res_556245_839829468; LOC14 = (Ttype290840*)0; LOC14 = skiptypes_294099_850551059((*n0).typ, IL64(211106232576256)); LOC15 = (Ttype290840*)0; LOC15 = elemtype_318394_3876443242(LOC14); initloc_530273_839829468((&arr0), ((Tlockind290808) 6), LOC15, (*d0).s); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_536188_839829468((*d0)); LOC16[1] = intliteral_537270_839829468(((NI64) (i_556234_839829468))); arr0.r = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_138), LOC16, 2); expr_537248_839829468(p0, (*n0).kindU.S6.sons->data[i_556234_839829468], (&arr0)); res_556245_839829468 += ((NI) 1); } LA13: ; } } } LA4: ; } N_NIMCALL(void, gentupleconstr_555618_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { Tloc290816 rec0; memset((void*)(&rec0), 0, sizeof(rec0)); { NIM_BOOL LOC3; Ttype290840* t0; Ropeobj177006* LOC6; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_552853_839829468(p0, n0, d0); if (!!(LOC3)) goto LA4; t0 = getuniquetype_526640_2036603609((*n0).typ); LOC6 = (Ropeobj177006*)0; LOC6 = gettypedesc_533671_839829468((*p0).module, t0); { if (!((*d0).k == ((Tlockind290808) 0))) goto LA9; gettemp_535032_839829468(p0, t0, d0, NIM_FALSE); } LA9: ; { NI i_555646_839829468; NI HEX3Atmp_555803_839829468; NI LOC12; NI res_555806_839829468; i_555646_839829468 = (NI)0; HEX3Atmp_555803_839829468 = (NI)0; LOC12 = (NI)0; LOC12 = sonslen_293351_850551059(n0); HEX3Atmp_555803_839829468 = (NI)(LOC12 - ((NI) 1)); res_555806_839829468 = ((NI) 0); { while (1) { Tnode290802* it0; TY530811 LOC19; if (!(res_555806_839829468 <= HEX3Atmp_555803_839829468)) goto LA14; i_555646_839829468 = res_555806_839829468; it0 = (*n0).kindU.S6.sons->data[i_555646_839829468]; { if (!((*it0).kind == ((Tnodekind290020) 34))) goto LA17; it0 = (*it0).kindU.S6.sons->data[((NI) 1)]; } LA17: ; initloc_530273_839829468((&rec0), ((Tlockind290808) 6), (*it0).typ, (*d0).s); memset((void*)LOC19, 0, sizeof(LOC19)); LOC19[0] = rdloc_536188_839829468((*d0)); LOC19[1] = rope_177401_2381377266(((NI64) (i_555646_839829468))); rec0.r = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_185), LOC19, 2); expr_537248_839829468(p0, it0, (&rec0)); res_555806_839829468 += ((NI) 1); } LA14: ; } } } LA4: ; } N_NIMCALL(Tsym290834*, lookupfieldagain_551153_839829468)(Tcproc527021* p0, Ttype290840* ty_551156_839829468, Tsym290834* field0, Ropeobj177006** r0) { Tsym290834* result0; Ttype290840* ty0; result0 = (Tsym290834*)0; ty0 = ty_551156_839829468; { while (1) { if (!!((ty0 == NIM_NIL))) goto LA2; ty0 = skiptypes_294099_850551059(ty0, IL64(211106247215360)); result0 = lookupinrecord_297119_2984716966((*ty0).n, (*field0).name); { if (!!((result0 == NIM_NIL))) goto LA5; goto LA1; } LA5: ; { NIM_BOOL LOC9; LOC9 = (NIM_BOOL)0; LOC9 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC9) goto LA10; LOC9 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA10: ; if (!!(LOC9)) goto LA11; add_177487_2381377266(r0, ((NimStringDesc*) &T839829468_153)); } LA11: ; ty0 = getuniquetype_526640_2036603609((*ty0).sons->data[((NI) 0)]); } LA2: ; } LA1: ; { if (!(result0 == NIM_NIL)) goto LA15; internalerror_194100_155036129((*field0).info, ((NimStringDesc*) &T839829468_532)); } LA15: ; return result0; } N_NIMCALL(void, genfieldcheck_551504_839829468)(Tcproc527021* p0, Tnode290802* e0, Ropeobj177006* obj0, Tsym290834* field0, Ttype290840* origty0) { Tloc290816 test0; Tloc290816 u0; Tloc290816 v0; memset((void*)(&test0), 0, sizeof(test0)); memset((void*)(&u0), 0, sizeof(u0)); memset((void*)(&v0), 0, sizeof(v0)); { NI i_551525_839829468; NI HEX3Atmp_552039_839829468; NI LOC2; NI res_552042_839829468; i_551525_839829468 = (NI)0; HEX3Atmp_552039_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_293351_850551059(e0); HEX3Atmp_552039_839829468 = (NI)(LOC2 - ((NI) 1)); res_552042_839829468 = ((NI) 1); { while (1) { Tnode290802* it0; Tsym290834* op0; Tnode290802* disc0; Ropeobj177006* o0; Tsym290834* d0; NI id0; Tnode290802* LOC9; Ropeobj177006* strlit0; if (!(res_552042_839829468 <= HEX3Atmp_552039_839829468)) goto LA4; i_551525_839829468 = res_552042_839829468; it0 = (*e0).kindU.S6.sons->data[i_551525_839829468]; op0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { if (!((*op0).magic == ((Tmagic290524) 99))) goto LA7; it0 = (*it0).kindU.S6.sons->data[((NI) 1)]; } LA7: ; disc0 = skipconv_326882_3876443242((*it0).kindU.S6.sons->data[((NI) 2)]); initloc_530273_839829468((&test0), ((Tlockind290808) 0), (*it0).typ, ((Tstorageloc290812) 2)); initlocexpr_537283_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&u0)); o0 = obj0; d0 = lookupfieldagain_551153_839829468(p0, origty0, (*disc0).kindU.S4.sym, &o0); initloc_530273_839829468((&v0), ((Tlockind290808) 6), (*d0).typ, ((Tstorageloc290812) 0)); v0.r = o0; add_177487_2381377266(&v0.r, ((NimStringDesc*) &T839829468_257)); add_177482_2381377266(&v0.r, (*d0).loc.r); geninexpraux_551496_839829468(p0, it0, (&u0), (&v0), (&test0)); LOC9 = (Tnode290802*)0; LOC9 = newstrnode_291678_850551059(((Tnodekind290020) 20), (*(*field0).name).s); id0 = nodetabletestorset_340682_1142335848((&(*(*p0).module).datacache), LOC9, ((NI) ((*(*p0).module).labels))); { if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA12; strlit0 = getstrlit_547468_839829468((*p0).module, (*(*field0).name).s); } goto LA10; LA12: ; { Ropeobj177006* LOC15; LOC15 = (Ropeobj177006*)0; LOC15 = rope_177401_2381377266(((NI64) (id0))); strlit0 = HEX26_177418_2381377266((*(*p0).module).tmpbase, LOC15); } LA10: ; { TY530811 LOC20; if (!((*op0).magic == ((Tmagic290524) 99))) goto LA18; memset((void*)LOC20, 0, sizeof(LOC20)); LOC20[0] = rdloc_536188_839829468(test0); LOC20[1] = strlit0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_534), LOC20, 2); } goto LA16; LA18: ; { TY530811 LOC22; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = rdloc_536188_839829468(test0); LOC22[1] = strlit0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_535), LOC22, 2); } LA16: ; res_552042_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, genobjconstr_552903_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 tmp0; Ttype290840* t0; NIM_BOOL isref0; Ropeobj177006* r0; Ropeobj177006* LOC13; Ttype290840* ty0; { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = handleconstexpr_552853_839829468(p0, e0, d0); if (!LOC3) goto LA4; goto BeforeRet; } LA4: ; memset((void*)(&tmp0), 0, sizeof(tmp0)); t0 = skiptypes_294099_850551059((*e0).typ, IL64(211106232576256)); gettemp_535032_839829468(p0, t0, (&tmp0), NIM_FALSE); isref0 = ((*t0).kind == ((Ttypekind290244) 22)); r0 = rdloc_536188_839829468(tmp0); { Ttype290840* LOC10; TY177507 LOC11; if (!isref0) goto LA8; rawgennew_552741_839829468(p0, tmp0, NIM_NIL); LOC10 = (Ttype290840*)0; LOC10 = lastson_293377_850551059(t0); t0 = skiptypes_294099_850551059(LOC10, IL64(211106232576256)); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = r0; r0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_124), LOC11, 1); gcusage_552439_839829468(e0); } goto LA6; LA8: ; { constructloc_536388_839829468(p0, tmp0, NIM_FALSE); } LA6: ; LOC13 = (Ropeobj177006*)0; LOC13 = gettypedesc_533671_839829468((*p0).module, t0); ty0 = getuniquetype_526640_2036603609(t0); { NI i_552944_839829468; NI HEX3Atmp_552997_839829468; NI LOC15; NI res_553000_839829468; i_552944_839829468 = (NI)0; HEX3Atmp_552997_839829468 = (NI)0; LOC15 = (NI)0; LOC15 = len_291081_850551059(e0); HEX3Atmp_552997_839829468 = (LOC15 - 1); res_553000_839829468 = ((NI) 1); { while (1) { Tnode290802* it0; Tloc290816 tmp20; Tsym290834* field0; if (!(res_553000_839829468 <= HEX3Atmp_552997_839829468)) goto LA17; i_552944_839829468 = res_553000_839829468; it0 = (*e0).kindU.S6.sons->data[i_552944_839829468]; memset((void*)(&tmp20), 0, sizeof(tmp20)); tmp20.r = r0; field0 = lookupfieldagain_551153_839829468(p0, ty0, (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym, &tmp20.r); { if (!((*field0).loc.r == NIM_NIL)) goto LA20; internalerror_194100_155036129((*e0).info, ((NimStringDesc*) &T839829468_533)); } LA20: ; { NIM_BOOL LOC24; NI LOC25; LOC24 = (NIM_BOOL)0; LOC25 = (NI)0; LOC25 = len_291081_850551059(it0); LOC24 = (LOC25 == ((NI) 3)); if (!(LOC24)) goto LA26; LOC24 = (((*p0).options &(1U<<((NU)(((Toption168009) 2))&31U)))!=0); LA26: ; if (!LOC24) goto LA27; genfieldcheck_551504_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 2)], r0, field0, ty0); } LA27: ; add_177487_2381377266(&tmp20.r, ((NimStringDesc*) &T839829468_257)); add_177482_2381377266(&tmp20.r, (*field0).loc.r); tmp20.k = ((Tlockind290808) 1); tmp20.t = (*field0).loc.t; { if (!isref0) goto LA31; tmp20.s = ((Tstorageloc290812) 3); } goto LA29; LA31: ; { tmp20.s = ((Tstorageloc290812) 2); } LA29: ; expr_537248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&tmp20)); res_553000_839829468 += ((NI) 1); } LA17: ; } } { if (!((*d0).k == ((Tlockind290808) 0))) goto LA36; genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI290816)); } goto LA34; LA36: ; { genassignment_537264_839829468(p0, (*d0), tmp0, 0); } LA34: ; }BeforeRet: ; } N_NIMCALL(void, gencast_554537_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Ttype290840* destt0; Ttype290840* srct0; destt0 = skiptypes_294099_850551059((*e0).typ, IL64(211106233624832)); srct0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832)); { NIM_BOOL LOC3; Ropeobj177006* lbl0; Tloc290816 tmp0; TY177507 LOC7; TY533238 LOC8; TY177507 LOC9; Ropeobj177006* LOC10; LOC3 = (NIM_BOOL)0; LOC3 = ((*destt0).kind >= ((Ttypekind290244) 36) && (*destt0).kind <= ((Ttypekind290244) 39) || (*destt0).kind == ((Ttypekind290244) 18) || (*destt0).kind == ((Ttypekind290244) 17) || (*destt0).kind == ((Ttypekind290244) 16) || (*destt0).kind == ((Ttypekind290244) 4)); if (LOC3) goto LA4; LOC3 = ((*srct0).kind >= ((Ttypekind290244) 36) && (*srct0).kind <= ((Ttypekind290244) 39) || (*srct0).kind == ((Ttypekind290244) 18) || (*srct0).kind == ((Ttypekind290244) 17) || (*srct0).kind == ((Ttypekind290244) 16) || (*srct0).kind == ((Ttypekind290244) 4)); LA4: ; if (!LOC3) goto LA5; (*p0).labels += ((NI) 1); lbl0 = rope_177401_2381377266(((NI64) ((*p0).labels))); memset((void*)(&tmp0), 0, sizeof(tmp0)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = lbl0; tmp0.r = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_536), LOC7, 1); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = gettypedesc_533671_839829468((*p0).module, srct0); LOC8[1] = gettypedesc_533671_839829468((*p0).module, destt0); LOC8[2] = lbl0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 0), ((NimStringDesc*) &T839829468_537), LOC8, 3); tmp0.k = ((Tlockind290808) 6); tmp0.t = srct0; tmp0.s = ((Tstorageloc290812) 2); tmp0.flags = 0; expr_537248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = lbl0; LOC10 = (Ropeobj177006*)0; LOC10 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_538), LOC9, 1); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC10, tmp0.s); } goto LA1; LA5: ; { gensomecast_554480_839829468(p0, e0, d0); } LA1: ; } N_NIMCALL(void, genconv_554632_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Ttype290840* desttype0; desttype0 = skiptypes_294099_850551059((*e0).typ, 8390656); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = comparetypes_324214_3876443242(desttype0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, ((Tdistinctcompare322427) 1), 0); if (!LOC3) goto LA4; expr_537248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0); } goto LA1; LA4: ; { gensomecast_554480_839829468(p0, e0, d0); } LA1: ; } static N_INLINE(NIM_BOOL, iscppref_550807_839829468)(Tcproc527021* p0, Ttype290840* typ0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; NIM_BOOL LOC3; Ttype290840* LOC6; Ttype290840* LOC8; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA4: ; LOC2 = LOC3; if (!(LOC2)) goto LA5; LOC6 = (Ttype290840*)0; LOC6 = skiptypes_294099_850551059(typ0, IL64(211106232576256)); LOC2 = ((*LOC6).kind == ((Ttypekind290244) 23)); LA5: ; LOC1 = LOC2; if (!(LOC1)) goto LA7; LOC8 = (Ttype290840*)0; LOC8 = skiptypes_294099_850551059(typ0, IL64(211106232576256)); LOC1 = !((((*LOC8).flags &(1U<<((NU)(((Ttypeflag290431) 18))&31U)))!=0)); LA7: ; result0 = LOC1; return result0; } N_NIMCALL(void, genaddr_551051_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { { Ttype290840* LOC3; Tloc290816 a0; Ropeobj177006* LOC6; LOC3 = (Ttype290840*)0; LOC3 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); if (!((*LOC3).kind == ((Ttypekind290244) 22) || (*LOC3).kind == ((Ttypekind290244) 21))) goto LA4; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC6 = (Ropeobj177006*)0; LOC6 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_52), a0.r); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC6, a0.s); } goto LA1; LA4: ; { NIM_BOOL LOC8; Tctypekind527007 LOC9; LOC8 = (NIM_BOOL)0; LOC9 = (Tctypekind527007)0; LOC9 = maptype_531393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); LOC8 = (LOC9 == ((Tctypekind527007) 17)); if (LOC8) goto LA10; LOC8 = iscppref_550807_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); LA10: ; if (!LOC8) goto LA11; expr_537248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); } goto LA1; LA11: ; { Tloc290816 a0; Ropeobj177006* LOC14; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC14 = (Ropeobj177006*)0; LOC14 = addrloc_536204_839829468(a0); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC14, a0.s); } LA1: ; } N_NIMCALL(void, genarrayelem_552093_839829468)(Tcproc527021* p0, Tnode290802* x0, Tnode290802* y0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 b0; Ttype290840* ty0; Ttype290840* LOC1; Ropeobj177006* first0; NI64 LOC2; Ttype290840* LOC47; Ttype290840* LOC48; TY533238 LOC49; Ropeobj177006* LOC50; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, x0, (&a0)); initlocexpr_537283_839829468(p0, y0, (&b0)); LOC1 = (Ttype290840*)0; LOC1 = skiptypes_294099_850551059(a0.t, IL64(211106242013440)); ty0 = skiptypes_294099_850551059(LOC1, IL64(211106247256320)); LOC2 = (NI64)0; LOC2 = firstord_318001_3876443242(ty0); first0 = intliteral_537270_839829468(LOC2); { NIM_BOOL LOC5; LOC5 = (NIM_BOOL)0; LOC5 = (((*p0).options &(1U<<((NU)(((Toption168009) 4))&31U)))!=0); if (!(LOC5)) goto LA6; LOC5 = !((((*ty0).flags &(1U<<((NU)(((Ttypeflag290431) 0))&31U)))!=0)); LA6: ; if (!LOC5) goto LA7; { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = isconstexpr_316510_2616423590(y0); if (!!(LOC11)) goto LA12; { NI64 LOC16; LOC16 = (NI64)0; LOC16 = firstord_318001_3876443242(ty0); if (!(LOC16 == IL64(0))) goto LA17; { NIM_BOOL LOC21; NI64 LOC22; NI64 LOC23; NI64 LOC25; NI64 LOC26; TY530811 LOC29; NI64 LOC30; LOC21 = (NIM_BOOL)0; LOC22 = (NI64)0; LOC22 = firstord_318001_3876443242(b0.t); LOC23 = (NI64)0; LOC23 = firstord_318001_3876443242(ty0); LOC21 = (LOC22 < LOC23); if (LOC21) goto LA24; LOC25 = (NI64)0; LOC25 = lastord_318004_3876443242(ty0); LOC26 = (NI64)0; LOC26 = lastord_318004_3876443242(b0.t); LOC21 = (LOC25 < LOC26); LA24: ; if (!LOC21) goto LA27; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdcharloc_536227_839829468(b0); LOC30 = (NI64)0; LOC30 = lastord_318004_3876443242(ty0); LOC29[1] = intliteral_537270_839829468(LOC30); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_539), LOC29, 2); } LA27: ; } goto LA14; LA17: ; { TY533238 LOC32; NI64 LOC33; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = rdcharloc_536227_839829468(b0); LOC32[1] = first0; LOC33 = (NI64)0; LOC33 = lastord_318004_3876443242(ty0); LOC32[2] = intliteral_537270_839829468(LOC33); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_540), LOC32, 3); } LA14: ; } goto LA9; LA12: ; { NI64 idx0; idx0 = getordvalue_318129_3876443242(y0); { NIM_BOOL LOC37; NI64 LOC38; NI64 LOC40; LOC37 = (NIM_BOOL)0; LOC38 = (NI64)0; LOC38 = firstord_318001_3876443242(ty0); LOC37 = (idx0 < LOC38); if (LOC37) goto LA39; LOC40 = (NI64)0; LOC40 = lastord_318004_3876443242(ty0); LOC37 = (LOC40 < idx0); LA39: ; if (!LOC37) goto LA41; localerror_194080_155036129((*x0).info, ((Tmsgkind189002) 86), ((NimStringDesc*) &T839829468_490)); } LA41: ; } LA9: ; } LA7: ; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA45; (*d0).s = a0.s; } LA45: ; LOC47 = (Ttype290840*)0; LOC47 = skiptypes_294099_850551059(ty0, IL64(211106240964864)); LOC48 = (Ttype290840*)0; LOC48 = elemtype_318394_3876443242(LOC47); memset((void*)LOC49, 0, sizeof(LOC49)); LOC49[0] = rdloc_536188_839829468(a0); LOC49[1] = rdcharloc_536227_839829468(b0); LOC49[2] = first0; LOC50 = (Ropeobj177006*)0; LOC50 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_541), LOC49, 3); putintodest_548468_839829468(p0, d0, LOC48, LOC50, a0.s); } N_NIMCALL(void, genopenarrayelem_552169_839829468)(Tcproc527021* p0, Tnode290802* x0, Tnode290802* y0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 b0; Ttype290840* LOC10; Ttype290840* LOC11; TY530811 LOC12; Ropeobj177006* LOC13; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, x0, (&a0)); initlocexpr_537283_839829468(p0, y0, (&b0)); { TY530811 LOC5; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 4))&31U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_536188_839829468(b0); LOC5[1] = rdloc_536188_839829468(a0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_542), LOC5, 2); } LA3: ; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA8; (*d0).s = a0.s; } LA8: ; LOC10 = (Ttype290840*)0; LOC10 = skiptypes_294099_850551059(a0.t, IL64(211106240964864)); LOC11 = (Ttype290840*)0; LOC11 = elemtype_318394_3876443242(LOC10); memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rdloc_536188_839829468(a0); LOC12[1] = rdcharloc_536227_839829468(b0); LOC13 = (Ropeobj177006*)0; LOC13 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC12, 2); putintodest_548468_839829468(p0, d0, LOC11, LOC13, a0.s); } N_NIMCALL(void, genseqelem_552205_839829468)(Tcproc527021* p0, Tnode290802* x0, Tnode290802* y0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 b0; Ttype290840* ty0; Ttype290840* LOC27; Ttype290840* LOC28; TY530811 LOC29; Ropeobj177006* LOC30; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, x0, (&a0)); initlocexpr_537283_839829468(p0, y0, (&b0)); ty0 = skiptypes_294099_850551059(a0.t, IL64(211106242013440)); { Ttype290840* LOC5; if (!((*ty0).kind == ((Ttypekind290244) 22) || (*ty0).kind == ((Ttypekind290244) 21))) goto LA3; LOC5 = (Ttype290840*)0; LOC5 = lastson_293377_850551059(ty0); ty0 = skiptypes_294099_850551059(LOC5, IL64(211106242013440)); } LA3: ; { if (!(((*p0).options &(1U<<((NU)(((Toption168009) 4))&31U)))!=0)) goto LA8; { TY533238 LOC14; if (!((*ty0).kind == ((Ttypekind290244) 28))) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_536188_839829468(b0); LOC14[1] = rdloc_536188_839829468(a0); LOC14[2] = lenfield_537305_839829468(p0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_543), LOC14, 3); } goto LA10; LA12: ; { TY533238 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rdloc_536188_839829468(b0); LOC16[1] = rdloc_536188_839829468(a0); LOC16[2] = lenfield_537305_839829468(p0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_544), LOC16, 3); } LA10: ; } LA8: ; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA19; (*d0).s = ((Tstorageloc290812) 3); } LA19: ; { Ttype290840* LOC23; TY177507 LOC26; LOC23 = (Ttype290840*)0; LOC23 = skiptypes_294099_850551059(a0.t, IL64(211106240964864)); if (!((*LOC23).kind == ((Ttypekind290244) 22) || (*LOC23).kind == ((Ttypekind290244) 21))) goto LA24; memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = a0.r; a0.r = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC26, 1); } LA24: ; LOC27 = (Ttype290840*)0; LOC27 = skiptypes_294099_850551059(a0.t, IL64(211106240964864)); LOC28 = (Ttype290840*)0; LOC28 = elemtype_318394_3876443242(LOC27); memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = rdloc_536188_839829468(a0); LOC29[1] = rdcharloc_536227_839829468(b0); LOC30 = (Ropeobj177006*)0; LOC30 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC29, 2); putintodest_548468_839829468(p0, d0, LOC28, LOC30, a0.s); } N_NIMCALL(void, gencstringelem_552144_839829468)(Tcproc527021* p0, Tnode290802* x0, Tnode290802* y0, Tloc290816* d0) { Tloc290816 a0; Tloc290816 b0; Ttype290840* ty0; Ttype290840* LOC5; Ttype290840* LOC6; TY530811 LOC7; Ropeobj177006* LOC8; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, x0, (&a0)); initlocexpr_537283_839829468(p0, y0, (&b0)); ty0 = skiptypes_294099_850551059(a0.t, IL64(211106242013440)); { if (!((*d0).k == ((Tlockind290808) 0))) goto LA3; (*d0).s = a0.s; } LA3: ; LOC5 = (Ttype290840*)0; LOC5 = skiptypes_294099_850551059(ty0, IL64(211106240964864)); LOC6 = (Ttype290840*)0; LOC6 = elemtype_318394_3876443242(LOC5); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_536188_839829468(a0); LOC7[1] = rdcharloc_536227_839829468(b0); LOC8 = (Ropeobj177006*)0; LOC8 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC7, 2); putintodest_548468_839829468(p0, d0, LOC6, LOC8, a0.s); } N_NIMCALL(void, gentupleelem_551124_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; NI i0; Ropeobj177006* LOC5; Ttype290840* ty0; Ropeobj177006* r0; TY177507 LOC8; memset((void*)(&a0), 0, sizeof(a0)); i0 = (NI)0; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); { if (!((*d0).k == ((Tlockind290808) 0))) goto LA3; (*d0).s = a0.s; } LA3: ; LOC5 = (Ropeobj177006*)0; LOC5 = gettypedesc_533671_839829468((*p0).module, a0.t); ty0 = getuniquetype_526640_2036603609(a0.t); r0 = rdloc_536188_839829468(a0); switch ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind) { case ((Tnodekind290020) 6) ... ((Tnodekind290020) 15): { i0 = ((NI) ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval)); } break; default: { internalerror_194100_155036129((*e0).info, ((NimStringDesc*) &T839829468_545)); } break; } memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rope_177401_2381377266(((NI64) (i0))); addf_178205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC8, 1); putintodest_548468_839829468(p0, d0, (*ty0).sons->data[i0], r0, a0.s); } N_NIMCALL(void, genbracketexpr_552277_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { Ttype290840* ty0; ty0 = skiptypes_294099_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440)); { Ttype290840* LOC5; if (!((*ty0).kind == ((Ttypekind290244) 22) || (*ty0).kind == ((Ttypekind290244) 21))) goto LA3; LOC5 = (Ttype290840*)0; LOC5 = lastson_293377_850551059(ty0); ty0 = skiptypes_294099_850551059(LOC5, IL64(211106242013440)); } LA3: ; switch ((*ty0).kind) { case ((Ttypekind290244) 16): case ((Ttypekind290244) 4): { genarrayelem_552093_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind290244) 27): case ((Ttypekind290244) 48): { genopenarrayelem_552169_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind290244) 24): case ((Ttypekind290244) 28): { genseqelem_552205_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind290244) 29): { gencstringelem_552144_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0); } break; case ((Ttypekind290244) 18): { gentupleelem_551124_839829468(p0, n0, d0); } break; default: { NimStringDesc* LOC12; LOC12 = (NimStringDesc*)0; LOC12 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI290244))->Sup.len + 21); appendString(LOC12, ((NimStringDesc*) &T839829468_547)); appendString(LOC12, reprEnum((NI)(*ty0).kind, (&NTI290244))); appendChar(LOC12, 41); internalerror_194100_155036129((*n0).info, LOC12); } break; } } N_NIMCALL(void, genderef_541921_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, NIM_BOOL enforcederef0) { Tctypekind527007 mt0; { mt0 = maptype_531393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((393216 &(1U<<((NU)(mt0)&31U)))!=0); if (!(LOC3)) goto LA4; LOC3 = !(enforcederef0); LA4: ; if (!LOC3) goto LA5; expr_537248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); { Ttype290840* LOC9; LOC9 = (Ttype290840*)0; LOC9 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); if (!((*LOC9).kind == ((Ttypekind290244) 22))) goto LA10; (*d0).s = ((Tstorageloc290812) 3); } LA10: ; } goto LA1; LA5: ; { Tloc290816 a0; Ttype290840* typ0; memset((void*)(&a0), 0, sizeof(a0)); typ0 = skiptypes_294099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { NIM_BOOL LOC15; NIM_BOOL LOC16; NIM_BOOL LOC17; NIM_BOOL LOC20; Tnode290802* LOC25; Tnode290802* LOC26; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC17 = (NIM_BOOL)0; LOC17 = ((*typ0).kind == ((Ttypekind290244) 23)); if (!(LOC17)) goto LA18; LOC17 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 18))&31U)))!=0)); LA18: ; LOC16 = LOC17; if (!(LOC16)) goto LA19; LOC20 = (NIM_BOOL)0; LOC20 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC20) goto LA21; LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA21: ; LOC16 = LOC20; LA19: ; LOC15 = LOC16; if (!(LOC15)) goto LA22; LOC15 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 64)); LA22: ; if (!LOC15) goto LA23; LOC25 = (Tnode290802*)0; LOC25 = HEX5BHEX5D_291238_850551059(e0, ((NI) 0)); LOC26 = (Tnode290802*)0; LOC26 = HEX5BHEX5D_291238_850551059(LOC25, ((NI) 0)); initlocexprsingleuse_537289_839829468(p0, LOC26, d0); goto BeforeRet; } goto LA13; LA23: ; { initlocexprsingleuse_537289_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA13: ; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA30; switch ((*typ0).kind) { case ((Ttypekind290244) 22): { (*d0).s = ((Tstorageloc290812) 3); } break; case ((Ttypekind290244) 23): { (*d0).s = ((Tstorageloc290812) 0); { NIM_BOOL LOC36; NIM_BOOL LOC37; NIM_BOOL LOC39; Ropeobj177006* LOC44; LOC36 = (NIM_BOOL)0; LOC37 = (NIM_BOOL)0; LOC37 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 18))&31U)))!=0)); if (!(LOC37)) goto LA38; LOC39 = (NIM_BOOL)0; LOC39 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC39) goto LA40; LOC39 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA40: ; LOC37 = LOC39; LA38: ; LOC36 = LOC37; if (!(LOC36)) goto LA41; LOC36 = ((*e0).kind == ((Tnodekind290020) 65)); LA41: ; if (!LOC36) goto LA42; LOC44 = (Ropeobj177006*)0; LOC44 = rdloc_536188_839829468(a0); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC44, a0.s); goto BeforeRet; } LA42: ; } break; case ((Ttypekind290244) 21): { (*d0).s = ((Tstorageloc290812) 0); } break; default: { NimStringDesc* LOC47; LOC47 = (NimStringDesc*)0; LOC47 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI290244))->Sup.len + 9); appendString(LOC47, ((NimStringDesc*) &T839829468_548)); appendString(LOC47, reprEnum((NI)(*typ0).kind, (&NTI290244))); internalerror_194100_155036129((*e0).info, LOC47); } break; } } goto LA28; LA30: ; { NIM_BOOL LOC49; LOC49 = (NIM_BOOL)0; LOC49 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC49) goto LA50; LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA50: ; if (!LOC49) goto LA51; { NIM_BOOL LOC55; NIM_BOOL LOC56; Ropeobj177006* LOC61; LOC55 = (NIM_BOOL)0; LOC56 = (NIM_BOOL)0; LOC56 = ((*typ0).kind == ((Ttypekind290244) 23)); if (!(LOC56)) goto LA57; LOC56 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag290431) 18))&31U)))!=0)); LA57: ; LOC55 = LOC56; if (!(LOC55)) goto LA58; LOC55 = ((*e0).kind == ((Tnodekind290020) 65)); LA58: ; if (!LOC55) goto LA59; LOC61 = (Ropeobj177006*)0; LOC61 = rdloc_536188_839829468(a0); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC61, a0.s); goto BeforeRet; } LA59: ; } goto LA28; LA51: ; LA28: ; { NIM_BOOL LOC64; Ropeobj177006* LOC68; LOC64 = (NIM_BOOL)0; LOC64 = enforcederef0; if (!(LOC64)) goto LA65; LOC64 = (mt0 == ((Tctypekind527007) 18)); LA65: ; if (!LOC64) goto LA66; LOC68 = (Ropeobj177006*)0; LOC68 = rdloc_536188_839829468(a0); putintodest_548468_839829468(p0, d0, (*a0.t).sons->data[((NI) 0)], LOC68, a0.s); } goto LA62; LA66: ; { TY177507 LOC70; Ropeobj177006* LOC71; memset((void*)LOC70, 0, sizeof(LOC70)); LOC70[0] = rdloc_536188_839829468(a0); LOC71 = (Ropeobj177006*)0; LOC71 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_124), LOC70, 1); putintodest_548468_839829468(p0, d0, (*e0).typ, LOC71, a0.s); } LA62: ; } LA1: ; }BeforeRet: ; } N_NIMCALL(Ttype290840*, genrecordfieldaux_551096_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0, Tloc290816* a0) { Ttype290840* result0; Ropeobj177006* LOC9; result0 = (Ttype290840*)0; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], a0); { if (!!(((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind290020) 3)))) goto LA3; internalerror_194100_155036129((*e0).info, ((NimStringDesc*) &T839829468_549)); } LA3: ; { if (!((*d0).k == ((Tlockind290808) 0))) goto LA7; (*d0).s = (*a0).s; } LA7: ; LOC9 = (Ropeobj177006*)0; LOC9 = gettypedesc_533671_839829468((*p0).module, (*a0).t); result0 = getuniquetype_526640_2036603609((*a0).t); return result0; } N_NIMCALL(void, genrecordfield_551448_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { Tloc290816 a0; Ttype290840* ty0; Ropeobj177006* r0; Tsym290834* f0; memset((void*)(&a0), 0, sizeof(a0)); ty0 = genrecordfieldaux_551096_839829468(p0, e0, d0, (&a0)); r0 = rdloc_536188_839829468(a0); f0 = (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; { TY177507 LOC5; if (!((*ty0).kind == ((Ttypekind290244) 18))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_177401_2381377266(((NI64) ((*f0).position))); addf_178205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC5, 1); putintodest_548468_839829468(p0, d0, (*f0).typ, r0, a0.s); } goto LA1; LA3: ; { Tsym290834* field0; TY177507 LOC11; field0 = lookupfieldagain_551153_839829468(p0, ty0, f0, &r0); { if (!((*field0).loc.r == NIM_NIL)) goto LA9; internalerror_194100_155036129((*e0).info, ((NimStringDesc*) &T839829468_550)); } LA9: ; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = (*field0).loc.r; addf_178205_2381377266(&r0, ((NimStringDesc*) &T839829468_551), LOC11, 1); putintodest_548468_839829468(p0, d0, (*field0).typ, r0, a0.s); } LA1: ; } N_NIMCALL(void, gencheckedrecordfield_552046_839829468)(Tcproc527021* p0, Tnode290802* e0, Tloc290816* d0) { { Tloc290816 a0; Ttype290840* ty0; Ropeobj177006* r0; Tsym290834* f0; Tsym290834* field0; TY177507 LOC9; Ropeobj177006* LOC10; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 2))&31U)))!=0)) goto LA3; memset((void*)(&a0), 0, sizeof(a0)); ty0 = genrecordfieldaux_551096_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0, (&a0)); r0 = rdloc_536188_839829468(a0); f0 = (*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; field0 = lookupfieldagain_551153_839829468(p0, ty0, f0, &r0); { if (!((*field0).loc.r == NIM_NIL)) goto LA7; internalerror_194100_155036129((*e0).info, ((NimStringDesc*) &T839829468_532)); } LA7: ; genfieldcheck_551504_839829468(p0, e0, r0, field0, ty0); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = (*field0).loc.r; LOC10 = (Ropeobj177006*)0; LOC10 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_551), LOC9, 1); add_177482_2381377266(&r0, LOC10); putintodest_548468_839829468(p0, d0, (*field0).typ, r0, a0.s); } goto LA1; LA3: ; { genrecordfield_551448_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0); } LA1: ; } N_NIMCALL(NI, startblock_541978_839829468)(Tcproc527021* p0, NimStringDesc* start0, Ropeobj177006** args0, NI args0Len0) { NI result0; result0 = (NI)0; linecg_530707_839829468(p0, ((Tcprocsection527011) 2), start0, args0, args0Len0); (*p0).labels += ((NI) 1); result0 = ((*p0).blocks ? (*p0).blocks->Sup.len : 0); (*p0).blocks = (TY527095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock527019), ((NI) ((NI)(result0 + ((NI) 1))))); (*p0).blocks->data[result0].id = ((NI) ((*p0).labels)); (*p0).blocks->data[result0].nestedtrystmts = ((NI16) (((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0))); (*p0).blocks->data[result0].nestedexceptstmts = ((NI16) ((*p0).inexceptblock)); return result0; } N_NIMCALL(Ropeobj177006*, blockbody_542025_839829468)(Tblock527019* b0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = (*b0).sections[(((Tcprocsection527011) 0))- 0]; { TY177507 LOC5; if (!(((NI16) 0) < (*b0).framelen)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_177401_2381377266(((NI64) ((*b0).framelen))); addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_554), LOC5, 1); } LA3: ; add_177482_2381377266(&result0, (*b0).sections[(((Tcprocsection527011) 1))- 0]); add_177482_2381377266(&result0, (*b0).sections[(((Tcprocsection527011) 2))- 0]); return result0; } N_NIMCALL(void, endblock_542035_839829468)(Tcproc527021* p0, Ropeobj177006* blockend0) { NI topblock0; Ropeobj177006* LOC1; topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); LOC1 = (Ropeobj177006*)0; LOC1 = blockbody_542025_839829468((&(*p0).blocks->data[topblock0])); add_177482_2381377266(&(*p0).blocks->data[(NI)(topblock0 - ((NI) 1))].sections[(((Tcprocsection527011) 2))- 0], LOC1); (*p0).blocks = (TY527095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock527019), ((NI) (topblock0))); line_530690_839829468(p0, ((Tcprocsection527011) 2), blockend0); } N_NIMCALL(void, endblock_542060_839829468)(Tcproc527021* p0) { NI topblock0; Ropeobj177006* blockend0; NI16 framelen0; topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); { TY177507 LOC5; if (!!(((*p0).blocks->data[topblock0].label == NIM_NIL))) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = (*p0).blocks->data[topblock0].label; blockend0 = ropecg_530407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_552), LOC5, 1); } goto LA1; LA3: ; { TY531289 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); blockend0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_160), LOC7, 0); } LA1: ; framelen0 = (*p0).blocks->data[topblock0].framelen; { TY177507 LOC12; if (!(((NI16) 0) < framelen0)) goto LA10; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_177401_2381377266(((NI64) (framelen0))); addf_178205_2381377266(&blockend0, ((NimStringDesc*) &T839829468_553), LOC12, 1); } LA10: ; endblock_542035_839829468(p0, blockend0); } N_NIMCALL(void, genblock_544083_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { NI oldbreakidx_544099_839829468; TY531289 LOC8; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_295440_850551059((*n0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind290808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_535032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA6: ; oldbreakidx_544099_839829468 = (*p0).breakidx; memset((void*)LOC8, 0, sizeof(LOC8)); (*p0).breakidx = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC8, 0); { Tsym290834* sym0; if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 1)))) goto LA11; sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; (*sym0).loc.k = ((Tlockind290808) 10); (*sym0).position = (NI)((*p0).breakidx + ((NI) 1)); } LA11: ; expr_537248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], d0); endblock_542060_839829468(p0); (*p0).breakidx = oldbreakidx_544099_839829468; } N_NIMCALL(void, genstmtlistexpr_556402_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { NI length0; length0 = sonslen_293351_850551059(n0); { NI i_556420_839829468; NI HEX3Atmp_556424_839829468; NI res_556427_839829468; i_556420_839829468 = (NI)0; HEX3Atmp_556424_839829468 = (NI)0; HEX3Atmp_556424_839829468 = (NI)(length0 - ((NI) 2)); res_556427_839829468 = ((NI) 0); { while (1) { if (!(res_556427_839829468 <= HEX3Atmp_556424_839829468)) goto LA3; i_556420_839829468 = res_556427_839829468; genstmts_537244_839829468(p0, (*n0).kindU.S6.sons->data[i_556420_839829468]); res_556427_839829468 += ((NI) 1); } LA3: ; } } { if (!(((NI) 0) < length0)) goto LA6; expr_537248_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0); } LA6: ; } N_NIMCALL(void, genif_542982_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { Tloc290816 a0; Ropeobj177006* lelse0; Ropeobj177006* lend0; memset((void*)(&a0), 0, sizeof(a0)); lelse0 = (Ropeobj177006*)0; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_295440_850551059((*n0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind290808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_535032_839829468(p0, (*n0).typ, d0, NIM_FALSE); } LA6: ; genlinedir_530823_839829468(p0, n0); lend0 = getlabel_537217_839829468(p0); { NI i_543011_839829468; NI HEX3Atmp_543435_839829468; NI LOC9; NI res_543438_839829468; i_543011_839829468 = (NI)0; HEX3Atmp_543435_839829468 = (NI)0; LOC9 = (NI)0; LOC9 = sonslen_293351_850551059(n0); HEX3Atmp_543435_839829468 = (NI)(LOC9 - ((NI) 1)); res_543438_839829468 = ((NI) 0); { while (1) { Tnode290802* it0; if (!(res_543438_839829468 <= HEX3Atmp_543435_839829468)) goto LA11; i_543011_839829468 = res_543438_839829468; { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = ((*d0).k == ((Tlockind290808) 1)); if (!(LOC14)) goto LA15; LOC14 = isemptytype_295440_850551059((*n0).typ); LA15: ; if (!LOC14) goto LA16; (*d0).k = ((Tlockind290808) 0); } LA16: ; it0 = (*n0).kindU.S6.sons->data[i_543011_839829468]; { NI LOC20; TY531289 LOC23; NI LOC24; TY530811 LOC25; LOC20 = (NI)0; LOC20 = len_291081_850551059(it0); if (!(LOC20 == ((NI) 2))) goto LA21; memset((void*)LOC23, 0, sizeof(LOC23)); LOC24 = (NI)0; LOC24 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC23, 0); initlocexprsingleuse_537289_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], (&a0)); lelse0 = getlabel_537217_839829468(p0); (*p0).labels += ((NI) 1); memset((void*)LOC25, 0, sizeof(LOC25)); LOC25[0] = rdloc_536188_839829468(a0); LOC25[1] = lelse0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_555), LOC25, 2); { NIM_BOOL LOC28; Ropeobj177006** LOC32; Ropeobj177006** LOC33; LOC28 = (NIM_BOOL)0; LOC28 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC28) goto LA29; LOC28 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA29: ; if (!LOC28) goto LA30; LOC32 = (Ropeobj177006**)0; LOC32 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); add_177487_2381377266(LOC32, ((NimStringDesc*) &T839829468_223)); expr_537248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0); LOC33 = (Ropeobj177006**)0; LOC33 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); add_177487_2381377266(LOC33, ((NimStringDesc*) &T839829468_280)); } goto LA26; LA30: ; { expr_537248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0); } LA26: ; endblock_542060_839829468(p0); { NI LOC37; TY177507 LOC40; LOC37 = (NI)0; LOC37 = sonslen_293351_850551059(n0); if (!(((NI) 1) < LOC37)) goto LA38; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = lend0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_556), LOC40, 1); } LA38: ; fixlabel_537230_839829468(p0, lelse0); } goto LA18; LA21: ; { NI LOC42; TY531289 LOC45; NI LOC46; LOC42 = (NI)0; LOC42 = len_291081_850551059(it0); if (!(LOC42 == ((NI) 1))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (NI)0; LOC46 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0); expr_537248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], d0); endblock_542060_839829468(p0); } goto LA18; LA43: ; { internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_557)); } LA18: ; res_543438_839829468 += ((NI) 1); } LA11: ; } } { NI LOC50; LOC50 = (NI)0; LOC50 = sonslen_293351_850551059(n0); if (!(((NI) 1) < LOC50)) goto LA51; fixlabel_537230_839829468(p0, lend0); } LA51: ; } N_NIMCALL(void, downconv_556581_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC3) goto LA4; LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; expr_537248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], d0); } goto LA1; LA5: ; { Ttype290840* dest0; Tnode290802* arg0; Ttype290840* src0; Tloc290816 a0; Ropeobj177006* r0; NIM_BOOL isref0; Ttype290840* LOC10; dest0 = skiptypes_294099_850551059((*n0).typ, IL64(211106247256320)); arg0 = (*n0).kindU.S6.sons->data[((NI) 0)]; { while (1) { if (!((*arg0).kind == ((Tnodekind290020) 66))) goto LA9; arg0 = (*arg0).kindU.S6.sons->data[((NI) 0)]; } LA9: ; } src0 = skiptypes_294099_850551059((*arg0).typ, IL64(211106247256320)); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, arg0, (&a0)); r0 = rdloc_536188_839829468(a0); LOC10 = (Ttype290840*)0; LOC10 = skiptypes_294099_850551059((*arg0).typ, IL64(211106232576256)); isref0 = ((*LOC10).kind == ((Ttypekind290244) 22) || (*LOC10).kind == ((Ttypekind290244) 21) || (*LOC10).kind == ((Ttypekind290244) 23)); { if (!isref0) goto LA13; add_177487_2381377266(&r0, ((NimStringDesc*) &T839829468_558)); } goto LA11; LA13: ; { add_177487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); } LA11: ; { NI i_556650_839829468; NI HEX3Atmp_556677_839829468; NI LOC17; NI res_556680_839829468; i_556650_839829468 = (NI)0; HEX3Atmp_556677_839829468 = (NI)0; LOC17 = (NI)0; LOC17 = inheritancediff_324252_3876443242(dest0, src0); HEX3Atmp_556677_839829468 = (LOC17 > 0? (LOC17) : -(LOC17)); res_556680_839829468 = ((NI) 2); { while (1) { if (!(res_556680_839829468 <= HEX3Atmp_556677_839829468)) goto LA19; i_556650_839829468 = res_556680_839829468; add_177487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); res_556680_839829468 += ((NI) 1); } LA19: ; } } { if (!isref0) goto LA22; { NIM_BOOL LOC26; Ttype290840* LOC28; TY530811 LOC31; LOC26 = (NIM_BOOL)0; LOC26 = ((*d0).k == ((Tlockind290808) 0)); if (!(LOC26)) goto LA27; LOC28 = (Ttype290840*)0; LOC28 = skiptypes_294099_850551059((*n0).typ, IL64(211106232576256)); LOC26 = ((*LOC28).kind == ((Ttypekind290244) 22) || (*LOC28).kind == ((Ttypekind290244) 21) || (*LOC28).kind == ((Ttypekind290244) 23)); LA27: ; if (!LOC26) goto LA29; gettemp_535032_839829468(p0, (*n0).typ, d0, NIM_FALSE); memset((void*)LOC31, 0, sizeof(LOC31)); LOC31[0] = rdloc_536188_839829468((*d0)); LOC31[1] = r0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_559), LOC31, 2); } goto LA24; LA29: ; { r0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_52), r0); putintodest_548468_839829468(p0, d0, (*n0).typ, r0, a0.s); } LA24: ; } goto LA20; LA22: ; { putintodest_548468_839829468(p0, d0, (*n0).typ, r0, a0.s); } LA20: ; } LA1: ; } N_NIMCALL(void, upconv_556431_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { Tloc290816 a0; Ttype290840* dest0; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); dest0 = skiptypes_294099_850551059((*n0).typ, IL64(211106247256320)); { NIM_BOOL LOC3; NIM_BOOL LOC5; Ropeobj177006* r0; Ropeobj177006* nilcheck0; Ttype290840* t0; LOC3 = (NIM_BOOL)0; LOC3 = (((*p0).options &(1U<<((NU)(((Toption168009) 1))&31U)))!=0); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = isobjlackingtypefield_531513_839829468(dest0); LOC3 = !(LOC5); LA4: ; if (!LOC3) goto LA6; r0 = rdloc_536188_839829468(a0); nilcheck0 = NIM_NIL; t0 = skiptypes_294099_850551059(a0.t, IL64(211106232576256)); { while (1) { Ttype290840* LOC23; if (!((*t0).kind == ((Ttypekind290244) 23) || (*t0).kind == ((Ttypekind290244) 21) || (*t0).kind == ((Ttypekind290244) 22))) goto LA9; { if (!!(((*t0).kind == ((Ttypekind290244) 23)))) goto LA12; nilcheck0 = r0; } LA12: ; { NIM_BOOL LOC16; NIM_BOOL LOC18; TY177507 LOC22; LOC16 = (NIM_BOOL)0; LOC16 = !(((*t0).kind == ((Ttypekind290244) 23))); if (LOC16) goto LA17; LOC18 = (NIM_BOOL)0; LOC18 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC18) goto LA19; LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA19: ; LOC16 = !(LOC18); LA17: ; if (!LOC16) goto LA20; memset((void*)LOC22, 0, sizeof(LOC22)); LOC22[0] = r0; r0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_124), LOC22, 1); } LA20: ; LOC23 = (Ttype290840*)0; LOC23 = lastson_293377_850551059(t0); t0 = skiptypes_294099_850551059(LOC23, IL64(211106232576256)); } LA9: ; } { NIM_BOOL LOC26; LOC26 = (NIM_BOOL)0; LOC26 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC26) goto LA27; LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA27: ; if (!!(LOC26)) goto LA28; { while (1) { NIM_BOOL LOC32; LOC32 = (NIM_BOOL)0; LOC32 = ((*t0).kind == ((Ttypekind290244) 17)); if (!(LOC32)) goto LA33; LOC32 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL)); LA33: ; if (!LOC32) goto LA31; add_177487_2381377266(&r0, ((NimStringDesc*) &T839829468_153)); t0 = skiptypes_294099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360)); } LA31: ; } } LA28: ; { TY533238 LOC38; if (!!((nilcheck0 == NIM_NIL))) goto LA36; memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = nilcheck0; LOC38[1] = r0; LOC38[2] = gentypeinfo_533941_839829468((*p0).module, dest0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_560), LOC38, 3); } goto LA34; LA36: ; { TY530811 LOC40; memset((void*)LOC40, 0, sizeof(LOC40)); LOC40[0] = r0; LOC40[1] = gentypeinfo_533941_839829468((*p0).module, dest0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_561), LOC40, 2); } LA34: ; } LA6: ; { TY530811 LOC45; Ropeobj177006* LOC46; if (!!(((*(*(*n0).kindU.S6.sons->data[((NI) 0)]).typ).kind == ((Ttypekind290244) 17)))) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC45[0] = gettypedesc_533671_839829468((*p0).module, (*n0).typ); LOC45[1] = rdloc_536188_839829468(a0); LOC46 = (Ropeobj177006*)0; LOC46 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_430), LOC45, 2); putintodest_548468_839829468(p0, d0, (*n0).typ, LOC46, a0.s); } goto LA41; LA43: ; { TY530811 LOC48; Ropeobj177006* LOC49; memset((void*)LOC48, 0, sizeof(LOC48)); LOC48[0] = gettypedesc_533671_839829468((*p0).module, dest0); LOC48[1] = addrloc_536204_839829468(a0); LOC49 = (Ropeobj177006*)0; LOC49 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_429), LOC48, 2); putintodest_548468_839829468(p0, d0, (*n0).typ, LOC49, a0.s); } LA41: ; } N_NIMCALL(void, genrangechck_554590_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0, NimStringDesc* magic0) { Tloc290816 a0; Ttype290840* dest0; memset((void*)(&a0), 0, sizeof(a0)); dest0 = skiptypes_294099_850551059((*n0).typ, IL64(211106240964864)); { NIM_BOOL LOC3; Ttype290840* LOC5; TY530811 LOC8; Ropeobj177006* LOC9; LOC3 = (NIM_BOOL)0; LOC3 = !((((*p0).options &(1U<<((NU)(((Toption168009) 3))&31U)))!=0)); if (LOC3) goto LA4; LOC5 = (Ttype290840*)0; LOC5 = skiptypes_294099_850551059(dest0, 1048576); LOC3 = ((*LOC5).kind >= ((Ttypekind290244) 40) && (*LOC5).kind <= ((Ttypekind290244) 44)); LA4: ; if (!LOC3) goto LA6; initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = gettypedesc_533671_839829468((*p0).module, dest0); LOC8[1] = rdcharloc_536227_839829468(a0); LOC9 = (Ropeobj177006*)0; LOC9 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_430), LOC8, 2); putintodest_548468_839829468(p0, d0, (*n0).typ, LOC9, a0.s); } goto LA1; LA6: ; { TY534475 LOC11; Ropeobj177006* LOC12; initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = gettypedesc_533671_839829468((*p0).module, dest0); LOC11[1] = rdcharloc_536227_839829468(a0); LOC11[2] = genliteral_547476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], dest0); LOC11[3] = genliteral_547476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 2)], dest0); LOC11[4] = rope_177277_2381377266(magic0); LOC12 = (Ropeobj177006*)0; LOC12 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_562), LOC11, 5); putintodest_548468_839829468(p0, d0, dest0, LOC12, a0.s); } LA1: ; } N_NIMCALL(void, convstrtocstr_554642_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { Tloc290816 a0; Ttype290840* LOC1; TY177507 LOC2; Ropeobj177006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (Ttype290840*)0; LOC1 = skiptypes_294099_850551059((*n0).typ, IL64(211106240964864)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_536188_839829468(a0); LOC3 = (Ropeobj177006*)0; LOC3 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_485), LOC2, 1); putintodest_548468_839829468(p0, d0, LOC1, LOC3, a0.s); } N_NIMCALL(void, convcstrtostr_554654_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { Tloc290816 a0; Ttype290840* LOC1; TY177507 LOC2; Ropeobj177006* LOC3; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (Ttype290840*)0; LOC1 = skiptypes_294099_850551059((*n0).typ, IL64(211106240964864)); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rdloc_536188_839829468(a0); LOC3 = (Ropeobj177006*)0; LOC3 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_411), LOC2, 1); putintodest_548468_839829468(p0, d0, LOC1, LOC3, a0.s); gcusage_552439_839829468(n0); } static N_INLINE(NIM_BOOL, isroutine_295323_850551059)(Tsym290834* s0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; result0 = ((258048 &(1U<<((NU)((*s0).kind)&31U)))!=0); return result0; } static N_INLINE(NIM_BOOL, isconstclosure_555810_839829468)(Tnode290802* n0) { NIM_BOOL result0; NIM_BOOL LOC1; NIM_BOOL LOC2; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC2 = (NIM_BOOL)0; LOC2 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3)); if (!(LOC2)) goto LA3; LOC2 = isroutine_295323_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym); LA3: ; LOC1 = LOC2; if (!(LOC1)) goto LA4; LOC1 = ((*(*n0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind290020) 23)); LA4: ; result0 = LOC1; return result0; } N_NIMCALL(void, genclosure_555836_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { { NIM_BOOL LOC3; Ropeobj177006* tmp0; Ropeobj177006* LOC6; TY533238 LOC7; LOC3 = (NIM_BOOL)0; LOC3 = isconstclosure_555810_839829468(n0); if (!LOC3) goto LA4; (*(*p0).module).labels += ((NI) 1); LOC6 = (Ropeobj177006*)0; LOC6 = rope_177401_2381377266(((NI64) ((*(*p0).module).labels))); tmp0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_566), LOC6); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = gettypedesc_533671_839829468((*p0).module, (*n0).typ); LOC7[1] = tmp0; LOC7[2] = genconstexpr_552849_839829468(p0, n0); addf_178205_2381377266(&(*(*p0).module).s[(((Tcfilesection527005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC7, 3); putintodest_548468_839829468(p0, d0, (*n0).typ, tmp0, ((Tstorageloc290812) 1)); } goto LA1; LA4: ; { Tloc290816 tmp0; Tloc290816 a0; Tloc290816 b0; TY533238 LOC14; memset((void*)(&tmp0), 0, sizeof(tmp0)); memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&b0), 0, sizeof(b0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&b0)); { Tnode290802* LOC11; LOC11 = (Tnode290802*)0; LOC11 = skipconv_326882_3876443242((*n0).kindU.S6.sons->data[((NI) 0)]); if (!((*LOC11).kind == ((Tnodekind290020) 155))) goto LA12; internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_567)); } LA12: ; gettemp_535032_839829468(p0, (*n0).typ, (&tmp0), NIM_FALSE); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rdloc_536188_839829468(tmp0); LOC14[1] = rdloc_536188_839829468(a0); LOC14[2] = rdloc_536188_839829468(b0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_568), LOC14, 3); putlocintodest_537258_839829468(p0, d0, tmp0); } LA1: ; } static N_INLINE(Ropeobj177006*, assignlabel_542020_839829468)(Tblock527019* b0) { Ropeobj177006* result0; Ropeobj177006* LOC1; result0 = (Ropeobj177006*)0; LOC1 = (Ropeobj177006*)0; LOC1 = rope_177401_2381377266(((NI64) ((*b0).id))); unsureAsgnRef((void**) (&(*b0).label), HEX26_177452_2381377266(((NimStringDesc*) &T839829468_296), LOC1)); result0 = (*b0).label; return result0; } N_NIMCALL(void, gencomputedgoto_543744_839829468)(Tcproc527021* p0, Tnode290802* n0) { NI casepos0; NI arraysize0; NI id0; Ropeobj177006* tmp0; TY177507 LOC27; Ropeobj177006* gotoarray0; TY530811 LOC28; TY177507 LOC33; NI topblock0; Ropeobj177006* oldbody0; Ropeobj177006* tailb0; Ropeobj177006* taila0; Tnode290802* casestmt0; Tloc290816 a_543871_839829468; TY530811 LOC41; { casepos0 = ((NI) -1); arraysize0 = (NI)0; { NI i_543768_839829468; NI HEX3Atmp_543933_839829468; NI LOC2; NI res_543936_839829468; i_543768_839829468 = (NI)0; HEX3Atmp_543933_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_291081_850551059(n0); HEX3Atmp_543933_839829468 = (LOC2 - 1); res_543936_839829468 = ((NI) 0); { while (1) { Tnode290802* it0; if (!(res_543936_839829468 <= HEX3Atmp_543933_839829468)) goto LA4; i_543768_839829468 = res_543936_839829468; it0 = (*n0).kindU.S6.sons->data[i_543768_839829468]; { NI64 asize0; if (!((*it0).kind == ((Tnodekind290020) 97))) goto LA7; { Tnode290802* LOC11; LOC11 = (Tnode290802*)0; LOC11 = lastson_293364_850551059(it0); if (!!(((*LOC11).kind == ((Tnodekind290020) 85)))) goto LA12; localerror_194085_155036129((*it0).info, ((NimStringDesc*) &T839829468_570)); goto BeforeRet; } LA12: ; casepos0 = i_543768_839829468; asize0 = lengthord_318007_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ); { if (!(IL64(10000) < asize0)) goto LA16; localerror_194085_155036129((*it0).info, ((NimStringDesc*) &T839829468_571)); goto BeforeRet; } LA16: ; arraysize0 = ((NI) (asize0)); { NI64 LOC20; LOC20 = (NI64)0; LOC20 = firstord_318001_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ); if (!!((LOC20 == IL64(0)))) goto LA21; localerror_194085_155036129((*it0).info, ((NimStringDesc*) &T839829468_572)); goto BeforeRet; } LA21: ; } LA7: ; res_543936_839829468 += ((NI) 1); } LA4: ; } } { if (!(casepos0 < ((NI) 0))) goto LA25; localerror_194085_155036129((*n0).info, ((NimStringDesc*) &T839829468_573)); goto BeforeRet; } LA25: ; id0 = (NI)(((NI) ((*p0).labels)) + ((NI) 1)); (*p0).labels += (NI)(arraysize0 + ((NI) 1)); memset((void*)LOC27, 0, sizeof(LOC27)); LOC27[0] = rope_177401_2381377266(((NI64) (id0))); tmp0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_574), LOC27, 1); memset((void*)LOC28, 0, sizeof(LOC28)); LOC28[0] = tmp0; LOC28[1] = rope_177401_2381377266(((NI64) (arraysize0))); gotoarray0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_575), LOC28, 2); { NI i_543819_839829468; NI HEX3Atmp_543941_839829468; NI res_543944_839829468; i_543819_839829468 = (NI)0; HEX3Atmp_543941_839829468 = (NI)0; HEX3Atmp_543941_839829468 = (NI)(arraysize0 - ((NI) 1)); res_543944_839829468 = ((NI) 1); { while (1) { TY177507 LOC32; if (!(res_543944_839829468 <= HEX3Atmp_543941_839829468)) goto LA31; i_543819_839829468 = res_543944_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); LOC32[0] = rope_177401_2381377266(((NI64) ((NI)(((NI) (id0)) + i_543819_839829468)))); addf_178205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_576), LOC32, 1); res_543944_839829468 += ((NI) 1); } LA31: ; } } memset((void*)LOC33, 0, sizeof(LOC33)); LOC33[0] = rope_177401_2381377266(((NI64) ((NI)(((NI) (id0)) + arraysize0)))); addf_178205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_577), LOC33, 1); line_530690_839829468(p0, ((Tcprocsection527011) 0), gotoarray0); topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1)); oldbody0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection527011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection527011) 2))- 0]), NIM_NIL); { NI j_543854_839829468; NI HEX3Atmp_543949_839829468; NI HEX3Atmp_543950_839829468; NI LOC35; NI res_543953_839829468; j_543854_839829468 = (NI)0; HEX3Atmp_543949_839829468 = (NI)0; HEX3Atmp_543950_839829468 = (NI)0; HEX3Atmp_543949_839829468 = (NI)(casepos0 + ((NI) 1)); LOC35 = (NI)0; LOC35 = len_291081_850551059(n0); HEX3Atmp_543950_839829468 = (LOC35 - 1); res_543953_839829468 = HEX3Atmp_543949_839829468; { while (1) { if (!(res_543953_839829468 <= HEX3Atmp_543950_839829468)) goto LA37; j_543854_839829468 = res_543953_839829468; genstmts_537244_839829468(p0, (*n0).kindU.S6.sons->data[j_543854_839829468]); res_543953_839829468 += ((NI) 1); } LA37: ; } } tailb0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection527011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection527011) 2))- 0]), NIM_NIL); { NI j_543866_839829468; NI HEX3Atmp_543958_839829468; NI res_543961_839829468; j_543866_839829468 = (NI)0; HEX3Atmp_543958_839829468 = (NI)0; HEX3Atmp_543958_839829468 = (NI)(casepos0 - ((NI) 1)); res_543961_839829468 = ((NI) 0); { while (1) { if (!(res_543961_839829468 <= HEX3Atmp_543958_839829468)) goto LA40; j_543866_839829468 = res_543961_839829468; genstmts_537244_839829468(p0, (*n0).kindU.S6.sons->data[j_543866_839829468]); res_543961_839829468 += ((NI) 1); } LA40: ; } } taila0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection527011) 2))- 0]; asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection527011) 2))- 0]), HEX26_177418_2381377266(oldbody0, taila0)); casestmt0 = (*n0).kindU.S6.sons->data[casepos0]; memset((void*)(&a_543871_839829468), 0, sizeof(a_543871_839829468)); initlocexpr_537283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a_543871_839829468)); memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = tmp0; LOC41[1] = rdloc_536188_839829468(a_543871_839829468); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_578), LOC41, 2); { NI i_543894_839829468; NI HEX3Atmp_543977_839829468; NI LOC43; NI res_543980_839829468; i_543894_839829468 = (NI)0; HEX3Atmp_543977_839829468 = (NI)0; LOC43 = (NI)0; LOC43 = len_291081_850551059(casestmt0); HEX3Atmp_543977_839829468 = (LOC43 - 1); res_543980_839829468 = ((NI) 1); { while (1) { TY531289 LOC46; NI LOC47; Tnode290802* it0; Tnode290802* LOC57; Ropeobj177006** LOC58; Ropeobj177006** LOC59; Tloc290816 a0; TY530811 LOC60; if (!(res_543980_839829468 <= HEX3Atmp_543977_839829468)) goto LA45; i_543894_839829468 = res_543980_839829468; memset((void*)LOC46, 0, sizeof(LOC46)); LOC47 = (NI)0; LOC47 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC46, 0); it0 = (*casestmt0).kindU.S6.sons->data[i_543894_839829468]; { NI j_543910_839829468; NI HEX3Atmp_543969_839829468; NI LOC49; NI res_543972_839829468; j_543910_839829468 = (NI)0; HEX3Atmp_543969_839829468 = (NI)0; LOC49 = (NI)0; LOC49 = len_291081_850551059(it0); HEX3Atmp_543969_839829468 = (NI)(LOC49 - ((NI) 2)); res_543972_839829468 = ((NI) 0); { while (1) { NI64 val0; TY177507 LOC56; if (!(res_543972_839829468 <= HEX3Atmp_543969_839829468)) goto LA51; j_543910_839829468 = res_543972_839829468; { if (!((*(*it0).kindU.S6.sons->data[j_543910_839829468]).kind == ((Tnodekind290020) 44))) goto LA54; localerror_194085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579)); goto BeforeRet; } LA54: ; val0 = getordvalue_318129_3876443242((*it0).kindU.S6.sons->data[j_543910_839829468]); memset((void*)LOC56, 0, sizeof(LOC56)); LOC56[0] = intliteral_537270_839829468((NI64)((NI64)(val0 + ((NI64) (id0))) + IL64(1))); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_580), LOC56, 1); res_543972_839829468 += ((NI) 1); } LA51: ; } } LOC57 = (Tnode290802*)0; LOC57 = lastson_293364_850551059(it0); genstmts_537244_839829468(p0, LOC57); LOC58 = (Ropeobj177006**)0; LOC58 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); add_177482_2381377266(LOC58, tailb0); LOC59 = (Ropeobj177006**)0; LOC59 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); add_177482_2381377266(LOC59, taila0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC60, 0, sizeof(LOC60)); LOC60[0] = tmp0; LOC60[1] = rdloc_536188_839829468(a0); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_578), LOC60, 2); endblock_542060_839829468(p0); res_543980_839829468 += ((NI) 1); } LA45: ; } } }BeforeRet: ; } N_NIMCALL(void, genwhilestmt_543984_839829468)(Tcproc527021* p0, Tnode290802* t0) { Tloc290816 a0; NI oldbreakidx_544011_839829468; TY531289 LOC1; Tnode290802* loopbody0; memset((void*)(&a0), 0, sizeof(a0)); (*p0).withinloop += ((NI) 1); genlinedir_530823_839829468(p0, t0); oldbreakidx_544011_839829468 = (*p0).breakidx; memset((void*)LOC1, 0, sizeof(LOC1)); (*p0).breakidx = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_569), LOC1, 0); (*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE; initlocexpr_537283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); { NIM_BOOL LOC4; Ropeobj177006* label0; TY530811 LOC8; LOC4 = (NIM_BOOL)0; LOC4 = !(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 6))); if (LOC4) goto LA5; LOC4 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval == IL64(0)); LA5: ; if (!LOC4) goto LA6; label0 = assignlabel_542020_839829468((&(*p0).blocks->data[(*p0).breakidx])); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_536188_839829468(a0); LOC8[1] = label0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_555), LOC8, 2); } LA6: ; loopbody0 = (*t0).kindU.S6.sons->data[((NI) 1)]; { NIM_BOOL LOC11; LOC11 = (NIM_BOOL)0; LOC11 = stmtscontainpragma_526083_2036603609(loopbody0, ((Tspecialword273003) 182)); if (!(LOC11)) goto LA12; LOC11 = ((Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop271004) 1))&7U)))!=0); LA12: ; if (!LOC11) goto LA13; { NIM_BOOL LOC17; NI LOC18; LOC17 = (NIM_BOOL)0; LOC18 = (NI)0; LOC18 = len_291081_850551059(loopbody0); LOC17 = (LOC18 == ((NI) 2)); if (!(LOC17)) goto LA19; LOC17 = ((*(*loopbody0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 1)); LA19: ; if (!LOC17) goto LA20; loopbody0 = (*loopbody0).kindU.S6.sons->data[((NI) 1)]; } LA20: ; gencomputedgoto_543744_839829468(p0, loopbody0); } goto LA9; LA13: ; { genstmts_537244_839829468(p0, loopbody0); } LA9: ; { TY531289 LOC27; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 19))&31U)))!=0)) goto LA25; memset((void*)LOC27, 0, sizeof(LOC27)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_581), LOC27, 0); } LA25: ; endblock_542060_839829468(p0); (*p0).breakidx = oldbreakidx_544011_839829468; (*p0).withinloop -= ((NI) 1); } N_NIMCALL(void, gengotovar_542258_839829468)(Tcproc527021* p0, Tnode290802* value0) { { if (!!(((*value0).kind >= ((Tnodekind290020) 5) && (*value0).kind <= ((Tnodekind290020) 15)))) goto LA3; localerror_194085_155036129((*value0).info, ((NimStringDesc*) &T839829468_582)); } goto LA1; LA3: ; { TY177507 LOC6; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_177401_2381377266((*value0).kindU.S1.intval); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_583), LOC6, 1); } LA1: ; } N_NIMCALL(void, varindynamiclib_536812_839829468)(Tcgen527027* m0, Tsym290834* sym0) { Tlib290820* lib0; Ropeobj177006* extname0; Ropeobj177006* tmp0; TY533235 LOC1; NimStringDesc* LOC2; TY530811 LOC3; lib0 = (*sym0).annex; extname0 = (*sym0).loc.r; loaddynamiclib_557480_839829468(m0, lib0); (*sym0).loc.flags |= ((NU16)1)<<((((Tlocflag290810) 0))%(sizeof(NU16)*8)); tmp0 = mangledynlibproc_536816_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0); (*m0).labels += ((NI) 2); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = tmp0; LOC1[1] = gettypedesc_533671_839829468(m0, (*sym0).typ); LOC1[2] = (*lib0).name; LOC2 = (NimStringDesc*)0; LOC2 = HEX24_177856_2381377266(extname0); LOC1[3] = makecstring_189638_155036129(LOC2); appcg_530632_839829468(m0, &(*m0).s[(((Tcfilesection527005) 16))- 0], ((NimStringDesc*) &T839829468_584), LOC1, 4); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = (*sym0).loc.r; LOC3[1] = gettypedesc_533671_839829468(m0, (*sym0).loc.t); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 9))- 0], ((NimStringDesc*) &T839829468_585), LOC3, 2); } N_NIMCALL(void, assignglobalvar_536819_839829468)(Tcproc527021* p0, Tsym290834* s0) { { { Ropeobj177006* LOC5; if (!((*s0).loc.k == ((Tlockind290808) 0))) goto LA3; LOC5 = (Ropeobj177006*)0; LOC5 = manglename_531205_839829468(s0); fillloc_530282_839829468((&(*s0).loc), ((Tlockind290808) 3), (*s0).typ, LOC5, ((Tstorageloc290812) 3)); } LA3: ; { Tcgen527027* q0; if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag290810) 4))&15U)))!=0)) goto LA8; q0 = findpendingmodule_530241_839829468((*p0).module, s0); { NIM_BOOL LOC12; NIM_BOOL LOC14; LOC12 = (NIM_BOOL)0; LOC12 = !((q0 == NIM_NIL)); if (!(LOC12)) goto LA13; LOC14 = (NIM_BOOL)0; LOC14 = containsorincl_266862_2627731572((&(*q0).declaredthings), (*s0).Sup.id); LOC12 = !(LOC14); LA13: ; if (!LOC12) goto LA15; varindynamiclib_536812_839829468(q0, s0); } goto LA10; LA15: ; { asgnRefNoCycle((void**) (&(*s0).loc.r), mangledynlibproc_536816_839829468(s0)); } LA10: ; goto BeforeRet; } LA8: ; useheader_530369_839829468((*p0).module, s0); { if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag290810) 3))&15U)))!=0)) goto LA20; goto BeforeRet; } LA20: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag290184) 22))&31U)))!=0)) goto LA24; declarethreadvar_536676_839829468((*p0).module, s0, (((*s0).flags &(1U<<((NU)(((Tsymflag290184) 5))&31U)))!=0)); } goto LA22; LA24: ; { Ropeobj177006* decl0; Ropeobj177006* td0; decl0 = NIM_NIL; td0 = gettypedesc_533671_839829468((*p0).module, (*s0).loc.t); { TY177507 LOC43; if (!(*s0).constraint == 0) goto LA29; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag290184) 5))&31U)))!=0)) goto LA33; add_177487_2381377266(&decl0, ((NimStringDesc*) &T839829468_240)); } LA33: ; add_177482_2381377266(&decl0, td0); { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag290184) 8))&31U)))!=0)) goto LA37; add_177487_2381377266(&decl0, ((NimStringDesc*) &T839829468_121)); } LA37: ; { if (!(((*s0).flags &(1U<<((NU)(((Tsymflag290184) 7))&31U)))!=0)) goto LA41; add_177487_2381377266(&decl0, ((NimStringDesc*) &T839829468_122)); } LA41: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = (*s0).loc.r; addf_178205_2381377266(&decl0, ((NimStringDesc*) &T839829468_242), LOC43, 1); } goto LA27; LA29: ; { NimStringDesc* LOC45; TY530811 LOC46; LOC45 = (NimStringDesc*)0; LOC45 = rawNewString((*(*s0).constraint).kindU.S3.strval->Sup.len + 3); appendString(LOC45, (*(*s0).constraint).kindU.S3.strval); appendString(LOC45, ((NimStringDesc*) &T839829468_497)); memset((void*)LOC46, 0, sizeof(LOC46)); LOC46[0] = td0; LOC46[1] = (*s0).loc.r; decl0 = HEX25_177905_2381377266(LOC45, LOC46, 2); } LA27: ; add_177482_2381377266(&(*(*p0).module).s[(((Tcfilesection527005) 9))- 0], decl0); } LA22: ; { if (!(((NI) 0) < (*p0).withinloop)) goto LA49; resetloc_536350_839829468(p0, (&(*s0).loc)); } LA49: ; { TY533238 LOC55; NimStringDesc* LOC56; NimStringDesc* LOC57; if (!(((*(*(*p0).module).module).options & 163840) == 163840)) goto LA53; memset((void*)LOC55, 0, sizeof(LOC55)); LOC56 = (NimStringDesc*)0; LOC56 = rawNewString((*(*(*s0).owner).name).s->Sup.len + (*(*s0).name).s->Sup.len + 1); appendString(LOC56, (*(*(*s0).owner).name).s); appendChar(LOC56, 46); appendString(LOC56, (*(*s0).name).s); LOC57 = (NimStringDesc*)0; LOC57 = nsuNormalize(LOC56); LOC55[0] = makecstring_189638_155036129(LOC57); LOC55[1] = (*s0).loc.r; LOC55[2] = gentypeinfo_533941_839829468((*p0).module, (*s0).typ); appcg_530632_839829468((*p0).module, &(*(*p0).module).s[(((Tcfilesection527005) 15))- 0], ((NimStringDesc*) &T839829468_586), LOC55, 3); } LA53: ; }BeforeRet: ; } N_NIMCALL(Ropeobj177006*, gentraverseprocforglobal_536032_839829468)(Tcgen527027* m0, Tsym290834* s0) { Ropeobj177006* result0; Ropeobj177006* LOC1; Ttraversalclosure535019 c0; Tcproc527021* p0; Ropeobj177006* sloc0; Ropeobj177006* header0; TY177507 LOC8; Ropeobj177006* generatedproc0; TY533235 LOC9; Ropeobj177006** LOC10; Ropeobj177006** LOC11; Ropeobj177006** LOC12; TY177507 LOC13; result0 = (Ropeobj177006*)0; LOC1 = (Ropeobj177006*)0; LOC1 = gentypeinfo_533941_839829468(m0, (*s0).loc.t); memset((void*)(&c0), 0, sizeof(c0)); p0 = newproc_527206_3723162438(NIM_NIL, m0); sloc0 = (*s0).loc.r; result0 = gettempname_531596_839829468(m0); { NIM_BOOL LOC4; LOC4 = (NIM_BOOL)0; LOC4 = (((*s0).flags &(1U<<((NU)(((Tsymflag290184) 22))&31U)))!=0); if (!(LOC4)) goto LA5; LOC4 = emulatedthreadvars_530949_839829468(); LA5: ; if (!LOC4) goto LA6; accessthreadlocalvar_530945_839829468(p0, s0); sloc0 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_288), sloc0); } LA6: ; c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_587)); c0.p = p0; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = result0; header0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_588), LOC8, 1); gentraverseproc_535022_839829468((&c0), sloc0, (*s0).loc.t); memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = header0; LOC10 = (Ropeobj177006**)0; LOC10 = s_527179_3723162438(p0, ((Tcprocsection527011) 0)); LOC9[1] = (*LOC10); LOC11 = (Ropeobj177006**)0; LOC11 = s_527179_3723162438(p0, ((Tcprocsection527011) 1)); LOC9[2] = (*LOC11); LOC12 = (Ropeobj177006**)0; LOC12 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); LOC9[3] = (*LOC12); generatedproc0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_190), LOC9, 4); memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = header0; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC13, 1); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 10))- 0], generatedproc0); return result0; } N_NIMCALL(void, registergcroot_541762_839829468)(Tcproc527021* p0, Tsym290834* v0) { { NIM_BOOL LOC3; Ropeobj177006* prc0; Ropeobj177006** LOC7; TY177507 LOC8; LOC3 = (NIM_BOOL)0; LOC3 = ((240 &(1U<<((NU)(gselectedgc_168133_2607990831)&7U)))!=0); if (!(LOC3)) goto LA4; LOC3 = containsgarbagecollectedref_318117_3876443242((*v0).loc.t); LA4: ; if (!LOC3) goto LA5; prc0 = gentraverseprocforglobal_536032_839829468((*p0).module, v0); LOC7 = (Ropeobj177006**)0; LOC7 = procsec_527194_3723162438((*(*p0).module).initproc, ((Tcprocsection527011) 1)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = prc0; appcg_530632_839829468((*p0).module, LOC7, ((NimStringDesc*) &T839829468_589), LOC8, 1); } LA5: ; } static N_INLINE(NIM_BOOL, isassignedimmediately_541781_839829468)(Tnode290802* n0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { if (!((*n0).kind == ((Tnodekind290020) 1))) goto LA3; result0 = NIM_FALSE; goto BeforeRet; } LA3: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = isinvalidreturntype_531548_839829468((*n0).typ); if (!LOC7) goto LA8; result0 = NIM_FALSE; goto BeforeRet; } LA8: ; result0 = NIM_TRUE; }BeforeRet: ; return result0; } N_NIMCALL(void, genasgncall_541695_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* d0) { { Ttype290840* LOC3; LOC3 = (Ttype290840*)0; LOC3 = skiptypes_294099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, 2048); if (!((*LOC3).callconv == ((Tcallingconvention290002) 8))) goto LA4; genclosurecall_538452_839829468(p0, le0, ri0, d0); } goto LA1; LA4: ; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3)); if (!(LOC7)) goto LA8; LOC7 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; geninfixcall_539929_839829468(p0, le0, ri0, d0); } goto LA1; LA9: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3)); if (!(LOC12)) goto LA13; LOC12 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag290184) 28))&31U)))!=0); LA13: ; if (!LOC12) goto LA14; gennamedparamcall_540616_839829468(p0, ri0, d0); } goto LA1; LA14: ; { genprefixcall_537960_839829468(p0, le0, ri0, d0); } LA1: ; poststmtactions_530942_839829468(p0); } static N_INLINE(void, loadinto_541928_839829468)(Tcproc527021* p0, Tnode290802* le0, Tnode290802* ri0, Tloc290816* a0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; LOC3 = (NIM_BOOL)0; LOC3 = ((*ri0).kind == ((Tnodekind290020) 27) || (*ri0).kind == ((Tnodekind290020) 29) || (*ri0).kind == ((Tnodekind290020) 30) || (*ri0).kind == ((Tnodekind290020) 31) || (*ri0).kind == ((Tnodekind290020) 26) || (*ri0).kind == ((Tnodekind290020) 28) || (*ri0).kind == ((Tnodekind290020) 32)); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = !(((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3))); if (LOC5) goto LA6; LOC5 = ((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).magic == ((Tmagic290524) 0)); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; genasgncall_541695_839829468(p0, le0, ri0, a0); } goto LA1; LA7: ; { if (!((*ri0).kind == ((Tnodekind290020) 47) || (*ri0).kind == ((Tnodekind290020) 65))) goto LA10; genderef_541921_839829468(p0, ri0, a0, NIM_TRUE); } goto LA1; LA10: ; { expr_537248_839829468(p0, ri0, a0); } LA1: ; } N_NIMCALL(void, gensinglevar_542276_839829468)(Tcproc527021* p0, Tnode290802* a0) { Tsym290834* v0; Tcproc527021* targetproc0; { v0 = (*(*a0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { if (!!(((1082130432 & (*v0).flags) == 0))) goto LA3; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag290184) 30))&31U)))!=0)) goto LA7; gengotovar_542258_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 2)]); } LA7: ; goto BeforeRet; } LA3: ; targetproc0 = p0; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag290184) 3))&31U)))!=0)) goto LA11; { NIM_BOOL LOC15; NIM_BOOL LOC16; LOC15 = (NIM_BOOL)0; LOC16 = (NIM_BOOL)0; LOC16 = (((*v0).flags & 96) == 32); if (!(LOC16)) goto LA17; LOC16 = ((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind290020) 1)); LA17: ; LOC15 = LOC16; if (!(LOC15)) goto LA18; LOC15 = !((((*v0).loc.flags & 72) == 0)); LA18: ; if (!LOC15) goto LA19; goto BeforeRet; } LA19: ; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag290184) 9))&31U)))!=0)) goto LA23; targetproc0 = (*(*p0).module).preinitproc; } LA23: ; assignglobalvar_536819_839829468(targetproc0, v0); genobjectinit_536242_839829468((*(*p0).module).preinitproc, ((Tcprocsection527011) 1), (*v0).typ, (*v0).loc, NIM_TRUE); { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = (((*v0).flags &(1U<<((NU)(((Tsymflag290184) 6))&31U)))!=0); if (!(LOC27)) goto LA28; LOC27 = !((generatedheader_530201_839829468 == NIM_NIL)); LA28: ; if (!LOC27) goto LA29; genvarprototypeaux_542254_839829468(generatedheader_530201_839829468, v0); } LA29: ; registergcroot_541762_839829468(p0, v0); } goto LA9; LA11: ; { Tnode290802* value0; NIM_BOOL imm0; value0 = (*a0).kindU.S6.sons->data[((NI) 2)]; imm0 = isassignedimmediately_541781_839829468(value0); { NIM_BOOL LOC34; NIM_BOOL LOC35; NIM_BOOL LOC36; NIM_BOOL LOC38; NIM_BOOL LOC42; Ropeobj177006* decl0; Tloc290816 tmp0; LOC34 = (NIM_BOOL)0; LOC35 = (NIM_BOOL)0; LOC36 = (NIM_BOOL)0; LOC36 = imm0; if (!(LOC36)) goto LA37; LOC38 = (NIM_BOOL)0; LOC38 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC38) goto LA39; LOC38 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA39: ; LOC36 = LOC38; LA37: ; LOC35 = LOC36; if (!(LOC35)) goto LA40; LOC35 = ((*p0).splitdecls == ((NI) 0)); LA40: ; LOC34 = LOC35; if (!(LOC34)) goto LA41; LOC42 = (NIM_BOOL)0; LOC42 = containshiddenpointer_318120_3876443242((*v0).typ); LOC34 = !(LOC42); LA41: ; if (!LOC34) goto LA43; genlinedir_530823_839829468(p0, a0); decl0 = localvardecl_536532_839829468(p0, v0); memset((void*)(&tmp0), 0, sizeof(tmp0)); { NIM_BOOL LOC47; NIM_BOOL LOC48; Tnode290802* LOC50; Tnode290802* LOC52; Ropeobj177006* params0; Ttype290840* typ0; TY530811 LOC66; LOC47 = (NIM_BOOL)0; LOC48 = (NIM_BOOL)0; LOC48 = ((*value0).kind == ((Tnodekind290020) 27) || (*value0).kind == ((Tnodekind290020) 29) || (*value0).kind == ((Tnodekind290020) 30) || (*value0).kind == ((Tnodekind290020) 31) || (*value0).kind == ((Tnodekind290020) 26) || (*value0).kind == ((Tnodekind290020) 28) || (*value0).kind == ((Tnodekind290020) 32)); if (!(LOC48)) goto LA49; LOC50 = (Tnode290802*)0; LOC50 = HEX5BHEX5D_291238_850551059(value0, ((NI) 0)); LOC48 = ((*LOC50).kind == ((Tnodekind290020) 3)); LA49: ; LOC47 = LOC48; if (!(LOC47)) goto LA51; LOC52 = (Tnode290802*)0; LOC52 = HEX5BHEX5D_291238_850551059(value0, ((NI) 0)); LOC47 = (((*(*LOC52).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag290184) 24))&31U)))!=0); LA51: ; if (!LOC47) goto LA53; params0 = (Ropeobj177006*)0; typ0 = skiptypes_294099_850551059((*(*value0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256)); { NI i_542619_839829468; NI HEX3Atmp_542825_839829468; NI LOC56; NI res_542828_839829468; i_542619_839829468 = (NI)0; HEX3Atmp_542825_839829468 = (NI)0; LOC56 = (NI)0; LOC56 = len_291081_850551059(value0); HEX3Atmp_542825_839829468 = (LOC56 - 1); res_542828_839829468 = ((NI) 1); { while (1) { Ropeobj177006* LOC65; if (!(res_542828_839829468 <= HEX3Atmp_542825_839829468)) goto LA58; i_542619_839829468 = res_542828_839829468; { TY531289 LOC63; Ropeobj177006* LOC64; if (!!((params0 == NIM_NIL))) goto LA61; memset((void*)LOC63, 0, sizeof(LOC63)); LOC64 = (Ropeobj177006*)0; LOC64 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_110), LOC63, 0); add_177482_2381377266(&params0, LOC64); } LA61: ; LOC65 = (Ropeobj177006*)0; LOC65 = genotherarg_537277_839829468(p0, value0, i_542619_839829468, typ0); add_177482_2381377266(&params0, LOC65); res_542828_839829468 += ((NI) 1); } LA58: ; } } memset((void*)LOC66, 0, sizeof(LOC66)); LOC66[0] = decl0; LOC66[1] = params0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_590), LOC66, 2); } goto LA45; LA53: ; { TY530811 LOC68; initlocexprsingleuse_537289_839829468(p0, value0, (&tmp0)); memset((void*)LOC68, 0, sizeof(LOC68)); LOC68[0] = decl0; LOC68[1] = rdloc_536188_839829468(tmp0); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_591), LOC68, 2); } LA45: ; goto BeforeRet; } LA43: ; assignlocalvar_536614_839829468(p0, v0); initlocalvar_536398_839829468(p0, v0, imm0); } LA9: ; { if (!!(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind290020) 1)))) goto LA71; genlinedir_530823_839829468(targetproc0, a0); loadinto_541928_839829468(targetproc0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&(*v0).loc)); } LA71: ; }BeforeRet: ; } N_NIMCALL(void, genclosurevar_542832_839829468)(Tcproc527021* p0, Tnode290802* a0) { NIM_BOOL immediateasgn0; immediateasgn0 = !(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind290020) 1))); { Tloc290816 v0; if (!immediateasgn0) goto LA3; memset((void*)(&v0), 0, sizeof(v0)); initlocexpr_537283_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (&v0)); genlinedir_530823_839829468(p0, a0); loadinto_541928_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&v0)); } LA3: ; } N_NIMCALL(void, genvartuple_541794_839829468)(Tcproc527021* p0, Tnode290802* n0) { Tloc290816 tup0; Tloc290816 field0; NI L0; NIM_BOOL uselowering0; Ttype290840* t0; { memset((void*)(&tup0), 0, sizeof(tup0)); memset((void*)(&field0), 0, sizeof(field0)); { if (!!(((*n0).kind == ((Tnodekind290020) 36)))) goto LA3; internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592)); } LA3: ; L0 = sonslen_293351_850551059(n0); uselowering0 = NIM_FALSE; { NI i_541822_839829468; NI HEX3Atmp_541905_839829468; NI res_541908_839829468; i_541822_839829468 = (NI)0; HEX3Atmp_541905_839829468 = (NI)0; HEX3Atmp_541905_839829468 = (NI)(L0 - ((NI) 3)); res_541908_839829468 = ((NI) 0); { while (1) { if (!(res_541908_839829468 <= HEX3Atmp_541905_839829468)) goto LA7; i_541822_839829468 = res_541908_839829468; { Tnode290802* LOC10; LOC10 = (Tnode290802*)0; LOC10 = HEX5BHEX5D_291238_850551059(n0, i_541822_839829468); if (!!(((*LOC10).kind == ((Tnodekind290020) 3)))) goto LA11; uselowering0 = NIM_TRUE; goto LA5; } LA11: ; res_541908_839829468 += ((NI) 1); } LA7: ; } } LA5: ; { Tnode290802* LOC17; if (!uselowering0) goto LA15; LOC17 = (Tnode290802*)0; LOC17 = lowertupleunpacking_431037_2218250499(n0, (*p0).prc); genstmts_537244_839829468(p0, LOC17); goto BeforeRet; } LA15: ; genlinedir_530823_839829468(p0, n0); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(L0 - ((NI) 1))], (&tup0)); t0 = getuniquetype_526640_2036603609(tup0.t); { NI i_541846_839829468; NI HEX3Atmp_541914_839829468; NI res_541917_839829468; i_541846_839829468 = (NI)0; HEX3Atmp_541914_839829468 = (NI)0; HEX3Atmp_541914_839829468 = (NI)(L0 - ((NI) 3)); res_541917_839829468 = ((NI) 0); { while (1) { if (!(res_541917_839829468 <= HEX3Atmp_541914_839829468)) goto LA20; i_541846_839829468 = res_541917_839829468; { Tsym290834* v0; v0 = (*(*n0).kindU.S6.sons->data[i_541846_839829468]).kindU.S4.sym; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag290184) 23))&31U)))!=0)) goto LA24; goto LA21; } LA24: ; { if (!(((*v0).flags &(1U<<((NU)(((Tsymflag290184) 3))&31U)))!=0)) goto LA28; assignglobalvar_536819_839829468(p0, v0); genobjectinit_536242_839829468(p0, ((Tcprocsection527011) 1), (*v0).typ, (*v0).loc, NIM_TRUE); registergcroot_541762_839829468(p0, v0); } goto LA26; LA28: ; { Tnode290802* LOC31; NIM_BOOL LOC32; assignlocalvar_536614_839829468(p0, v0); LOC31 = (Tnode290802*)0; LOC31 = HEX5BHEX5D_291238_850551059(n0, (NI)(L0 - ((NI) 1))); LOC32 = (NIM_BOOL)0; LOC32 = isassignedimmediately_541781_839829468(LOC31); initlocalvar_536398_839829468(p0, v0, LOC32); } LA26: ; initloc_530273_839829468((&field0), ((Tlockind290808) 6), (*t0).sons->data[i_541846_839829468], tup0.s); { TY530811 LOC37; if (!((*t0).kind == ((Ttypekind290244) 18))) goto LA35; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = rdloc_536188_839829468(tup0); LOC37[1] = rope_177401_2381377266(((NI64) (i_541846_839829468))); field0.r = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_185), LOC37, 2); } goto LA33; LA35: ; { TY530811 LOC43; { if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_541846_839829468]).kind == ((Tnodekind290020) 3)))) goto LA41; internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592)); } LA41: ; memset((void*)LOC43, 0, sizeof(LOC43)); LOC43[0] = rdloc_536188_839829468(tup0); LOC43[1] = manglerecfieldname_532361_839829468((*(*(*t0).n).kindU.S6.sons->data[i_541846_839829468]).kindU.S4.sym, t0); field0.r = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_90), LOC43, 2); } LA33: ; putlocintodest_537258_839829468(p0, (&(*v0).loc), field0); } LA21: ; res_541917_839829468 += ((NI) 1); } LA20: ; } } }BeforeRet: ; } N_NIMCALL(void, genvarstmt_542854_839829468)(Tcproc527021* p0, Tnode290802* n0) { { NI i_542869_839829468; NI HEX3Atmp_542902_839829468; NI LOC2; NI res_542905_839829468; i_542869_839829468 = (NI)0; HEX3Atmp_542902_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_293351_850551059(n0); HEX3Atmp_542902_839829468 = (NI)(LOC2 - ((NI) 1)); res_542905_839829468 = ((NI) 0); { while (1) { if (!(res_542905_839829468 <= HEX3Atmp_542902_839829468)) goto LA4; i_542869_839829468 = res_542905_839829468; { Tnode290802* a0; a0 = (*n0).kindU.S6.sons->data[i_542869_839829468]; { if (!((*a0).kind == ((Tnodekind290020) 125))) goto LA8; goto LA5; } LA8: ; { if (!((*a0).kind == ((Tnodekind290020) 35))) goto LA12; { if (!((*(*a0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3))) goto LA16; gensinglevar_542276_839829468(p0, a0); } goto LA14; LA16: ; { genclosurevar_542832_839829468(p0, a0); } LA14: ; } goto LA10; LA12: ; { genvartuple_541794_839829468(p0, a0); } LA10: ; } LA5: ; res_542905_839829468 += ((NI) 1); } LA4: ; } } } static N_INLINE(NIM_BOOL, emitlazily_530248_839829468)(Tsym290834* s0) { NIM_BOOL result0; NIM_BOOL LOC1; Tsym290834* LOC3; result0 = (NIM_BOOL)0; LOC1 = (NIM_BOOL)0; LOC1 = ((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 2))&63U)))!=0); if (LOC1) goto LA2; LOC3 = (Tsym290834*)0; LOC3 = getmodule_297123_2984716966(s0); LOC1 = (((*LOC3).flags &(1U<<((NU)(((Tsymflag290184) 25))&31U)))!=0); LA2: ; result0 = LOC1; return result0; } N_NIMCALL(void, genconststmt_542909_839829468)(Tcproc527021* p0, Tnode290802* t0) { { NI i_542924_839829468; NI HEX3Atmp_542975_839829468; NI LOC2; NI res_542978_839829468; i_542924_839829468 = (NI)0; HEX3Atmp_542975_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_293351_850551059(t0); HEX3Atmp_542975_839829468 = (NI)(LOC2 - ((NI) 1)); res_542978_839829468 = ((NI) 0); { while (1) { if (!(res_542978_839829468 <= HEX3Atmp_542975_839829468)) goto LA4; i_542924_839829468 = res_542978_839829468; { Tnode290802* it0; Tsym290834* c0; it0 = (*t0).kindU.S6.sons->data[i_542924_839829468]; { if (!((*it0).kind == ((Tnodekind290020) 125))) goto LA8; goto LA5; } LA8: ; { if (!!(((*it0).kind == ((Tnodekind290020) 102)))) goto LA12; internalerror_194100_155036129((*t0).info, ((NimStringDesc*) &T839829468_593)); } LA12: ; c0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NIM_BOOL LOC16; LOC16 = (NIM_BOOL)0; LOC16 = containscompiletimeonly_326721_3876443242((*c0).typ); if (!LOC16) goto LA17; goto LA5; } goto LA14; LA17: ; { NIM_BOOL LOC20; NIM_BOOL LOC21; NI LOC24; LOC20 = (NIM_BOOL)0; LOC21 = (NIM_BOOL)0; LOC21 = ((*(*c0).typ).kind == ((Ttypekind290244) 4) || (*(*c0).typ).kind == ((Ttypekind290244) 16) || (*(*c0).typ).kind == ((Ttypekind290244) 19) || (*(*c0).typ).kind == ((Ttypekind290244) 18) || (*(*c0).typ).kind == ((Ttypekind290244) 24)); if (!(LOC21)) goto LA22; LOC21 = !((((*c0).loc.flags &(1U<<((NU)(((Tlocflag290810) 3))&15U)))!=0)); LA22: ; LOC20 = LOC21; if (!(LOC20)) goto LA23; LOC24 = (NI)0; LOC24 = len_291081_850551059((*c0).ast); LOC20 = !((LOC24 == ((NI) 0))); LA23: ; if (!LOC20) goto LA25; { NIM_BOOL LOC29; LOC29 = (NIM_BOOL)0; LOC29 = emitlazily_530248_839829468(c0); if (!!(LOC29)) goto LA30; requestconstimpl_537240_839829468(p0, c0); } LA30: ; } goto LA14; LA25: ; LA14: ; } LA5: ; res_542978_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, gencasestringbranch_545100_839829468)(Tcproc527021* p0, Tnode290802* b0, Tloc290816 e0, Ropeobj177006* labl0, Ropeobj177006** branches0, NI branches0Len0) { Tloc290816 x0; NI length0; memset((void*)(&x0), 0, sizeof(x0)); length0 = sonslen_293351_850551059(b0); { NI i_545122_839829468; NI HEX3Atmp_545409_839829468; NI res_545412_839829468; i_545122_839829468 = (NI)0; HEX3Atmp_545409_839829468 = (NI)0; HEX3Atmp_545409_839829468 = (NI)(length0 - ((NI) 2)); res_545412_839829468 = ((NI) 0); { while (1) { NI j0; NI64 LOC4; TY533238 LOC5; if (!(res_545412_839829468 <= HEX3Atmp_545409_839829468)) goto LA3; i_545122_839829468 = res_545412_839829468; initlocexpr_537283_839829468(p0, (*b0).kindU.S6.sons->data[i_545122_839829468], (&x0)); LOC4 = (NI64)0; LOC4 = hashstring_526100_2036603609((*(*b0).kindU.S6.sons->data[i_545122_839829468]).kindU.S3.strval); j0 = ((NI) ((NI64)(LOC4 & ((NI64) ((branches0Len0-1)))))); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_536188_839829468(e0); LOC5[1] = rdloc_536188_839829468(x0); LOC5[2] = labl0; appcg_530632_839829468((*p0).module, &branches0[j0], ((NimStringDesc*) &T839829468_595), LOC5, 3); res_545412_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, exprblock_542103_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { TY531289 LOC1; NI LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0); expr_537248_839829468(p0, n0, d0); endblock_542060_839829468(p0); } N_NIMCALL(Ropeobj177006*, gencasesecondpass_544965_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0, NI labid0, NI until0) { Ropeobj177006* result0; Ropeobj177006* lend0; result0 = (Ropeobj177006*)0; lend0 = getlabel_537217_839829468(p0); { NI i_544984_839829468; NI res_545017_839829468; i_544984_839829468 = (NI)0; res_545017_839829468 = ((NI) 1); { while (1) { TY177507 LOC10; if (!(res_545017_839829468 <= until0)) goto LA3; i_544984_839829468 = res_545017_839829468; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = ((*d0).k == ((Tlockind290808) 1)); if (!(LOC6)) goto LA7; LOC6 = isemptytype_295440_850551059((*t0).typ); LA7: ; if (!LOC6) goto LA8; (*d0).k = ((Tlockind290808) 0); } LA8: ; memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rope_177401_2381377266(((NI64) ((NI)(labid0 + i_544984_839829468)))); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_599), LOC10, 1); { NI length0; TY177507 LOC15; if (!((*(*t0).kindU.S6.sons->data[i_544984_839829468]).kind == ((Tnodekind290020) 85))) goto LA13; length0 = sonslen_293351_850551059((*t0).kindU.S6.sons->data[i_544984_839829468]); exprblock_542103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_544984_839829468]).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = lend0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_556), LOC15, 1); } goto LA11; LA13: ; { exprblock_542103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_544984_839829468]).kindU.S6.sons->data[((NI) 0)], d0); } LA11: ; res_545017_839829468 += ((NI) 1); } LA3: ; } } result0 = lend0; return result0; } N_NIMCALL(void, gencasegenericbranch_544910_839829468)(Tcproc527021* p0, Tnode290802* b0, Tloc290816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj177006* labl0) { Tloc290816 x0; Tloc290816 y0; NI length0; memset((void*)(&x0), 0, sizeof(x0)); memset((void*)(&y0), 0, sizeof(y0)); length0 = sonslen_293351_850551059(b0); { NI i_544932_839829468; NI HEX3Atmp_544958_839829468; NI res_544961_839829468; i_544932_839829468 = (NI)0; HEX3Atmp_544958_839829468 = (NI)0; HEX3Atmp_544958_839829468 = (NI)(length0 - ((NI) 2)); res_544961_839829468 = ((NI) 0); { while (1) { if (!(res_544961_839829468 <= HEX3Atmp_544958_839829468)) goto LA3; i_544932_839829468 = res_544961_839829468; { TY533235 LOC8; if (!((*(*b0).kindU.S6.sons->data[i_544932_839829468]).kind == ((Tnodekind290020) 44))) goto LA6; initlocexpr_537283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_544932_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0)); initlocexpr_537283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_544932_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0)); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdcharloc_536227_839829468(e0); LOC8[1] = rdcharloc_536227_839829468(x0); LOC8[2] = rdcharloc_536227_839829468(y0); LOC8[3] = labl0; linecg_530707_839829468(p0, ((Tcprocsection527011) 2), rangeformat0, LOC8, 4); } goto LA4; LA6: ; { TY533238 LOC10; initlocexpr_537283_839829468(p0, (*b0).kindU.S6.sons->data[i_544932_839829468], (&x0)); memset((void*)LOC10, 0, sizeof(LOC10)); LOC10[0] = rdcharloc_536227_839829468(e0); LOC10[1] = rdcharloc_536227_839829468(x0); LOC10[2] = labl0; linecg_530707_839829468(p0, ((Tcprocsection527011) 2), eqformat0, LOC10, 3); } LA4: ; res_544961_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(Ropeobj177006*, genifforcaseuntil_545021_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc290816 a0) { Ropeobj177006* result0; NI labid0; result0 = (Ropeobj177006*)0; labid0 = (*p0).labels; { NI i_545042_839829468; NI res_545083_839829468; i_545042_839829468 = (NI)0; res_545083_839829468 = ((NI) 1); { while (1) { if (!(res_545083_839829468 <= until0)) goto LA3; i_545042_839829468 = res_545083_839829468; (*p0).labels += ((NI) 1); { Ropeobj177006* LOC8; Ropeobj177006* LOC9; if (!((*(*t0).kindU.S6.sons->data[i_545042_839829468]).kind == ((Tnodekind290020) 85))) goto LA6; LOC8 = (Ropeobj177006*)0; LOC8 = rope_177401_2381377266(((NI64) ((*p0).labels))); LOC9 = (Ropeobj177006*)0; LOC9 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_296), LOC8); gencasegenericbranch_544910_839829468(p0, (*t0).kindU.S6.sons->data[i_545042_839829468], a0, rangeformat0, eqformat0, LOC9); } goto LA4; LA6: ; { TY177507 LOC11; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rope_177401_2381377266(((NI64) ((*p0).labels))); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_598), LOC11, 1); } LA4: ; res_545083_839829468 += ((NI) 1); } LA3: ; } } { NI LOC14; NI gototarget0; TY177507 LOC17; TY177507 LOC18; LOC14 = (NI)0; LOC14 = len_291081_850551059(t0); if (!(until0 < (NI)(LOC14 - ((NI) 1)))) goto LA15; (*p0).labels += ((NI) 1); gototarget0 = (*p0).labels; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = rope_177401_2381377266(((NI64) (gototarget0))); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_598), LOC17, 1); result0 = gencasesecondpass_544965_839829468(p0, t0, d0, ((NI) (labid0)), until0); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = rope_177401_2381377266(((NI64) (gototarget0))); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_599), LOC18, 1); } goto LA12; LA15: ; { result0 = gencasesecondpass_544965_839829468(p0, t0, d0, ((NI) (labid0)), until0); } LA12: ; return result0; } N_NIMCALL(void, gencasegeneric_545087_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0) { Tloc290816 a0; Ropeobj177006* lend0; NI LOC1; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); LOC1 = (NI)0; LOC1 = sonslen_293351_850551059(t0); lend0 = genifforcaseuntil_545021_839829468(p0, t0, d0, rangeformat0, eqformat0, (NI)(LOC1 - ((NI) 1)), a0); fixlabel_537230_839829468(p0, lend0); } N_NIMCALL(void, genstringcase_545416_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0) { NI strings0; strings0 = ((NI) 0); { NI i_545434_839829468; NI HEX3Atmp_545549_839829468; NI LOC2; NI res_545552_839829468; i_545434_839829468 = (NI)0; HEX3Atmp_545549_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_293351_850551059(t0); HEX3Atmp_545549_839829468 = (NI)(LOC2 - ((NI) 1)); res_545552_839829468 = ((NI) 1); { while (1) { if (!(res_545552_839829468 <= HEX3Atmp_545549_839829468)) goto LA4; i_545434_839829468 = res_545552_839829468; { NI LOC9; if (!((*(*t0).kindU.S6.sons->data[i_545434_839829468]).kind == ((Tnodekind290020) 85))) goto LA7; LOC9 = (NI)0; LOC9 = sonslen_293351_850551059((*t0).kindU.S6.sons->data[i_545434_839829468]); strings0 += (NI)(LOC9 - ((NI) 1)); } LA7: ; res_545552_839829468 += ((NI) 1); } LA4: ; } } { NI bitmask0; NI LOC14; TY189350* branches0; Tloc290816 a0; NI labid0; TY530811 LOC26; TY531289 LOC35; Ropeobj177006* lend0; NI LOC42; if (!(((NI) 8) < strings0)) goto LA12; LOC14 = (NI)0; LOC14 = nextpoweroftwo_100629_1009420244(strings0); bitmask0 = (NI)(LOC14 - ((NI) 1)); branches0 = (TY189350*)0; branches0 = (TY189350*) newSeq((&NTI189350), ((NI) ((NI)(bitmask0 + ((NI) 1))))); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); labid0 = (*p0).labels; { NI i_545483_839829468; NI HEX3Atmp_545559_839829468; NI LOC16; NI res_545562_839829468; i_545483_839829468 = (NI)0; HEX3Atmp_545559_839829468 = (NI)0; LOC16 = (NI)0; LOC16 = sonslen_293351_850551059(t0); HEX3Atmp_545559_839829468 = (NI)(LOC16 - ((NI) 1)); res_545562_839829468 = ((NI) 1); { while (1) { if (!(res_545562_839829468 <= HEX3Atmp_545559_839829468)) goto LA18; i_545483_839829468 = res_545562_839829468; (*p0).labels += ((NI) 1); { Ropeobj177006* LOC23; Ropeobj177006* LOC24; if (!((*(*t0).kindU.S6.sons->data[i_545483_839829468]).kind == ((Tnodekind290020) 85))) goto LA21; LOC23 = (Ropeobj177006*)0; LOC23 = rope_177401_2381377266(((NI64) ((*p0).labels))); LOC24 = (Ropeobj177006*)0; LOC24 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_296), LOC23); gencasestringbranch_545100_839829468(p0, (*t0).kindU.S6.sons->data[i_545483_839829468], a0, LOC24, branches0->data, branches0->Sup.len); } goto LA19; LA21: ; { } LA19: ; res_545562_839829468 += ((NI) 1); } LA18: ; } } memset((void*)LOC26, 0, sizeof(LOC26)); LOC26[0] = rdloc_536188_839829468(a0); LOC26[1] = rope_177401_2381377266(((NI64) (bitmask0))); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_596), LOC26, 2); { NI j_545517_839829468; NI HEX3Atmp_545567_839829468; NI res_545570_839829468; j_545517_839829468 = (NI)0; HEX3Atmp_545567_839829468 = (NI)0; HEX3Atmp_545567_839829468 = (branches0 ? (branches0->Sup.len-1) : -1); res_545570_839829468 = ((NI) 0); { while (1) { if (!(res_545570_839829468 <= HEX3Atmp_545567_839829468)) goto LA29; j_545517_839829468 = res_545570_839829468; { TY530811 LOC34; if (!!((branches0->data[j_545517_839829468] == NIM_NIL))) goto LA32; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = intliteral_537270_839829468(((NI64) (j_545517_839829468))); LOC34[1] = branches0->data[j_545517_839829468]; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_597), LOC34, 2); } LA32: ; res_545570_839829468 += ((NI) 1); } LA29: ; } } memset((void*)LOC35, 0, sizeof(LOC35)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_160), LOC35, 0); { NI LOC38; TY177507 LOC41; LOC38 = (NI)0; LOC38 = sonslen_293351_850551059(t0); if (!!(((*(*t0).kindU.S6.sons->data[(NI)(LOC38 - ((NI) 1))]).kind == ((Tnodekind290020) 85)))) goto LA39; memset((void*)LOC41, 0, sizeof(LOC41)); LOC41[0] = rope_177401_2381377266(((NI64) ((*p0).labels))); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_598), LOC41, 1); } LA39: ; LOC42 = (NI)0; LOC42 = sonslen_293351_850551059(t0); lend0 = gencasesecondpass_544965_839829468(p0, t0, d0, ((NI) (labid0)), (NI)(LOC42 - ((NI) 1))); fixlabel_537230_839829468(p0, lend0); } goto LA10; LA12: ; { gencasegeneric_545087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_490), ((NimStringDesc*) &T839829468_595)); } LA10: ; } N_NIMCALL(void, gengotoforcase_543673_839829468)(Tcproc527021* p0, Tnode290802* casestmt0) { { { NI i_543695_839829468; NI HEX3Atmp_543737_839829468; NI LOC2; NI res_543740_839829468; i_543695_839829468 = (NI)0; HEX3Atmp_543737_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_291081_850551059(casestmt0); HEX3Atmp_543737_839829468 = (LOC2 - 1); res_543740_839829468 = ((NI) 1); { while (1) { TY531289 LOC5; NI LOC6; Tnode290802* it0; Tnode290802* LOC16; if (!(res_543740_839829468 <= HEX3Atmp_543737_839829468)) goto LA4; i_543695_839829468 = res_543740_839829468; memset((void*)LOC5, 0, sizeof(LOC5)); LOC6 = (NI)0; LOC6 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC5, 0); it0 = (*casestmt0).kindU.S6.sons->data[i_543695_839829468]; { NI j_543711_839829468; NI HEX3Atmp_543730_839829468; NI LOC8; NI res_543733_839829468; j_543711_839829468 = (NI)0; HEX3Atmp_543730_839829468 = (NI)0; LOC8 = (NI)0; LOC8 = len_291081_850551059(it0); HEX3Atmp_543730_839829468 = (NI)(LOC8 - ((NI) 2)); res_543733_839829468 = ((NI) 0); { while (1) { NI64 val0; TY177507 LOC15; if (!(res_543733_839829468 <= HEX3Atmp_543730_839829468)) goto LA10; j_543711_839829468 = res_543733_839829468; { if (!((*(*it0).kindU.S6.sons->data[j_543711_839829468]).kind == ((Tnodekind290020) 44))) goto LA13; localerror_194085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579)); goto BeforeRet; } LA13: ; val0 = getordvalue_318129_3876443242((*it0).kindU.S6.sons->data[j_543711_839829468]); memset((void*)LOC15, 0, sizeof(LOC15)); LOC15[0] = rope_177401_2381377266(val0); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_602), LOC15, 1); res_543733_839829468 += ((NI) 1); } LA10: ; } } LOC16 = (Tnode290802*)0; LOC16 = lastson_293364_850551059(it0); genstmts_537244_839829468(p0, LOC16); endblock_542060_839829468(p0); res_543740_839829468 += ((NI) 1); } LA4: ; } } }BeforeRet: ; } N_NIMCALL(NIM_BOOL, branchhastoobigrange_545575_839829468)(Tnode290802* b0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; { NI i_545590_839829468; NI HEX3Atmp_545608_839829468; NI LOC2; NI res_545611_839829468; i_545590_839829468 = (NI)0; HEX3Atmp_545608_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_293351_850551059(b0); HEX3Atmp_545608_839829468 = (NI)(LOC2 - ((NI) 2)); res_545611_839829468 = ((NI) 0); { while (1) { if (!(res_545611_839829468 <= HEX3Atmp_545608_839829468)) goto LA4; i_545590_839829468 = res_545611_839829468; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = ((*(*b0).kindU.S6.sons->data[i_545590_839829468]).kind == ((Tnodekind290020) 44)); if (!(LOC7)) goto LA8; LOC7 = (IL64(256) < (NI64)((*(*(*b0).kindU.S6.sons->data[i_545590_839829468]).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval - (*(*(*b0).kindU.S6.sons->data[i_545590_839829468]).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval)); LA8: ; if (!LOC7) goto LA9; result0 = NIM_TRUE; goto BeforeRet; } LA9: ; res_545611_839829468 += ((NI) 1); } LA4: ; } } }BeforeRet: ; return result0; } N_NIMCALL(NI, ifswitchsplitpoint_545615_839829468)(Tcproc527021* p0, Tnode290802* n0) { NI result0; result0 = (NI)0; { NI i_545630_839829468; NI HEX3Atmp_545654_839829468; NI LOC2; NI res_545657_839829468; i_545630_839829468 = (NI)0; HEX3Atmp_545654_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = len_291081_850551059(n0); HEX3Atmp_545654_839829468 = (NI)(LOC2 - ((NI) 1)); res_545657_839829468 = ((NI) 1); { while (1) { Tnode290802* branch0; Tnode290802* stmtblock0; if (!(res_545657_839829468 <= HEX3Atmp_545654_839829468)) goto LA4; i_545630_839829468 = res_545657_839829468; branch0 = HEX5BHEX5D_291238_850551059(n0, i_545630_839829468); stmtblock0 = lastson_293364_850551059(branch0); { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = stmtscontainpragma_526083_2036603609(stmtblock0, ((Tspecialword273003) 181)); if (!LOC7) goto LA8; result0 = i_545630_839829468; } goto LA5; LA8: ; { if (!!(((Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop271004) 0))&7U)))!=0))) goto LA11; { NIM_BOOL LOC15; LOC15 = (NIM_BOOL)0; LOC15 = ((*branch0).kind == ((Tnodekind290020) 85)); if (!(LOC15)) goto LA16; LOC15 = branchhastoobigrange_545575_839829468(branch0); LA16: ; if (!LOC15) goto LA17; result0 = i_545630_839829468; } LA17: ; } goto LA5; LA11: ; LA5: ; res_545657_839829468 += ((NI) 1); } LA4: ; } } return result0; } N_NIMCALL(void, genordinalcase_545724_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { NI splitpoint0; Tloc290816 a0; Ropeobj177006* lend0; splitpoint0 = ifswitchsplitpoint_545615_839829468(p0, n0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); { if (!(((NI) 0) < splitpoint0)) goto LA3; lend0 = genifforcaseuntil_545021_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601), splitpoint0, a0); } goto LA1; LA3: ; { lend0 = NIM_NIL; } LA1: ; { NI LOC8; TY177507 LOC11; NIM_BOOL hasdefault0; TY531289 LOC37; LOC8 = (NI)0; LOC8 = len_291081_850551059(n0); if (!((NI)(splitpoint0 + ((NI) 1)) < LOC8)) goto LA9; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = rdcharloc_536227_839829468(a0); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_603), LOC11, 1); hasdefault0 = NIM_FALSE; { NI i_545757_839829468; NI HEX3Atmp_545816_839829468; NI HEX3Atmp_545817_839829468; NI LOC13; NI res_545820_839829468; i_545757_839829468 = (NI)0; HEX3Atmp_545816_839829468 = (NI)0; HEX3Atmp_545817_839829468 = (NI)0; HEX3Atmp_545816_839829468 = (NI)(splitpoint0 + ((NI) 1)); LOC13 = (NI)0; LOC13 = len_291081_850551059(n0); HEX3Atmp_545817_839829468 = (LOC13 - 1); res_545820_839829468 = HEX3Atmp_545816_839829468; { while (1) { Tnode290802* branch0; Tnode290802* LOC28; TY531289 LOC29; if (!(res_545820_839829468 <= HEX3Atmp_545817_839829468)) goto LA15; i_545757_839829468 = res_545820_839829468; { NIM_BOOL LOC18; LOC18 = (NIM_BOOL)0; LOC18 = ((*d0).k == ((Tlockind290808) 1)); if (!(LOC18)) goto LA19; LOC18 = isemptytype_295440_850551059((*n0).typ); LA19: ; if (!LOC18) goto LA20; (*d0).k = ((Tlockind290808) 0); } LA20: ; branch0 = HEX5BHEX5D_291238_850551059(n0, i_545757_839829468); { if (!((*branch0).kind == ((Tnodekind290020) 85))) goto LA24; gencaserange_535028_839829468(p0, branch0); } goto LA22; LA24: ; { TY531289 LOC27; memset((void*)LOC27, 0, sizeof(LOC27)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_181), LOC27, 0); hasdefault0 = NIM_TRUE; } LA22: ; LOC28 = (Tnode290802*)0; LOC28 = lastson_293364_850551059(branch0); exprblock_542103_839829468(p0, LOC28, d0); memset((void*)LOC29, 0, sizeof(LOC29)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_182), LOC29, 0); res_545820_839829468 += ((NI) 1); } LA15: ; } } { NIM_BOOL LOC32; TY531289 LOC36; LOC32 = (NIM_BOOL)0; LOC32 = ((Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop271004) 3))&7U)))!=0); if (!(LOC32)) goto LA33; LOC32 = !(hasdefault0); LA33: ; if (!LOC32) goto LA34; memset((void*)LOC36, 0, sizeof(LOC36)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_604), LOC36, 0); } LA34: ; memset((void*)LOC37, 0, sizeof(LOC37)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_160), LOC37, 0); } LA9: ; { if (!!((lend0 == NIM_NIL))) goto LA40; fixlabel_537230_839829468(p0, lend0); } LA40: ; } N_NIMCALL(void, gencase_545826_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0) { Ttype290840* LOC8; genlinedir_530823_839829468(p0, t0); { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_295440_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind290808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_535032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; LOC8 = (Ttype290840*)0; LOC8 = skiptypes_294099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440)); switch ((*LOC8).kind) { case ((Ttypekind290244) 28): { genstringcase_545416_839829468(p0, t0, d0); } break; case ((Ttypekind290244) 36) ... ((Ttypekind290244) 39): { gencasegeneric_545087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601)); } break; default: { { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3)); if (!(LOC14)) goto LA15; LOC14 = (((*(*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag290184) 30))&31U)))!=0); LA15: ; if (!LOC14) goto LA16; gengotoforcase_543673_839829468(p0, t0); } goto LA12; LA16: ; { genordinalcase_545724_839829468(p0, t0, d0); } LA12: ; } break; } } static N_INLINE(Tnode290802*, pop_316246_1689653243)(Tnodeseq290796** s0) { Tnode290802* result0; NI L0; result0 = (Tnode290802*)0; L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1)); result0 = (*s0)->data[L0]; (*s0) = (Tnodeseq290796*) setLengthSeq(&((*s0))->Sup, sizeof(Tnode290802*), ((NI) (L0))); return result0; } N_NIMCALL(void, blockleaveactions_543442_839829468)(Tcproc527021* p0, NI howmanytrys0, NI howmanyexcepts0) { Tnodeseq290796* stack0; NI alreadypoppedcnt0; stack0 = (Tnodeseq290796*)0; stack0 = (Tnodeseq290796*) newSeq((&NTI290796), ((NI) 0)); alreadypoppedcnt0 = (*p0).inexceptblock; { NI i_543471_839829468; NI res_543596_839829468; i_543471_839829468 = (NI)0; res_543596_839829468 = ((NI) 1); { while (1) { Tnode290802* trystmt0; Tnode290802* finallystmt0; if (!(res_543596_839829468 <= howmanytrys0)) goto LA3; i_543471_839829468 = res_543596_839829468; { NIM_BOOL LOC6; LOC6 = (NIM_BOOL)0; LOC6 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC6) goto LA7; LOC6 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA7: ; if (!!(LOC6)) goto LA8; { if (!(((NI) 0) < alreadypoppedcnt0)) goto LA12; alreadypoppedcnt0 -= ((NI) 1); } goto LA10; LA12: ; { TY531289 LOC15; memset((void*)LOC15, 0, sizeof(LOC15)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_605), LOC15, 0); } LA10: ; } LA8: ; trystmt0 = pop_316246_1689653243((&(*p0).nestedtrystmts)); stack0 = (Tnodeseq290796*) incrSeqV2(&(stack0)->Sup, sizeof(Tnode290802*)); asgnRefNoCycle((void**) (&stack0->data[stack0->Sup.len]), trystmt0); ++stack0->Sup.len; finallystmt0 = lastson_293364_850551059(trystmt0); { if (!((*finallystmt0).kind == ((Tnodekind290020) 107))) goto LA18; genstmts_537244_839829468(p0, (*finallystmt0).kindU.S6.sons->data[((NI) 0)]); } LA18: ; res_543596_839829468 += ((NI) 1); } LA3: ; } } { NI i_543546_839829468; NI HEX3Atmp_543601_839829468; NI res_543604_839829468; i_543546_839829468 = (NI)0; HEX3Atmp_543601_839829468 = (NI)0; HEX3Atmp_543601_839829468 = (NI)(howmanytrys0 - ((NI) 1)); res_543604_839829468 = HEX3Atmp_543601_839829468; { while (1) { if (!(((NI) 0) <= res_543604_839829468)) goto LA22; i_543546_839829468 = res_543604_839829468; (*p0).nestedtrystmts = (Tnodeseq290796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode290802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), stack0->data[i_543546_839829468]); ++(*p0).nestedtrystmts->Sup.len; res_543604_839829468 -= ((NI) 1); } LA22: ; } } { NIM_BOOL LOC25; LOC25 = (NIM_BOOL)0; LOC25 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC25) goto LA26; LOC25 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA26: ; if (!!(LOC25)) goto LA27; { NI i_543587_839829468; NI HEX3Atmp_543610_839829468; NI res_543613_839829468; i_543587_839829468 = (NI)0; HEX3Atmp_543610_839829468 = (NI)0; HEX3Atmp_543610_839829468 = (NI)(howmanyexcepts0 - ((NI) 1)); res_543613_839829468 = HEX3Atmp_543610_839829468; { while (1) { TY531289 LOC32; if (!(((NI) 0) <= res_543613_839829468)) goto LA31; i_543587_839829468 = res_543613_839829468; memset((void*)LOC32, 0, sizeof(LOC32)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_606), LOC32, 0); res_543613_839829468 -= ((NI) 1); } LA31: ; } } } LA27: ; } N_NIMCALL(void, genreturnstmt_543617_839829468)(Tcproc527021* p0, Tnode290802* t0) { TY531289 LOC14; { { if (!(((*t0).flags &(1U<<((NU)(((Tnodeflag290427) 14))&15U)))!=0)) goto LA3; goto BeforeRet; } LA3: ; (*p0).beforeretneeded = NIM_TRUE; genlinedir_530823_839829468(p0, t0); { if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 1)))) goto LA7; genstmts_537244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)]); } LA7: ; blockleaveactions_543442_839829468(p0, ((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0), (*p0).inexceptblock); { Ropeobj177006* safepoint0; TY177507 LOC13; if (!(((NI) 0) < ((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0))) goto LA11; safepoint0 = (*p0).finallysafepoints->data[(NI)(((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0) - ((NI) 1))]; memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = safepoint0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_607), LOC13, 1); } LA11: ; memset((void*)LOC14, 0, sizeof(LOC14)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_608), LOC14, 0); }BeforeRet: ; } N_NIMCALL(void, genbreakstmt_544444_839829468)(Tcproc527021* p0, Tnode290802* t0) { NI idx0; Ropeobj177006* label0; TY177507 LOC16; idx0 = (*p0).breakidx; { Tsym290834* sym0; if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 1)))) goto LA3; sym0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; idx0 = (NI)((*sym0).position - ((NI) 1)); } goto LA1; LA3: ; { { while (1) { NIM_BOOL LOC8; LOC8 = (NIM_BOOL)0; LOC8 = (((NI) 0) <= idx0); if (!(LOC8)) goto LA9; LOC8 = !((*p0).blocks->data[idx0].isloop); LA9: ; if (!LOC8) goto LA7; idx0 -= ((NI) 1); } LA7: ; } { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = (idx0 < ((NI) 0)); if (LOC12) goto LA13; LOC12 = !((*p0).blocks->data[idx0].isloop); LA13: ; if (!LOC12) goto LA14; internalerror_194100_155036129((*t0).info, ((NimStringDesc*) &T839829468_609)); } LA14: ; } LA1: ; label0 = assignlabel_542020_839829468((&(*p0).blocks->data[idx0])); blockleaveactions_543442_839829468(p0, (NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) ((*p0).blocks->data[idx0].nestedtrystmts))), (NI)((*p0).inexceptblock - ((NI) ((*p0).blocks->data[idx0].nestedexceptstmts)))); genlinedir_530823_839829468(p0, t0); memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = label0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_556), LOC16, 1); } N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_547080_839829468)(Tcproc527021* p0, Tnode290802* asgn0) { NIM_BOOL result0; result0 = (NIM_BOOL)0; { Tnode290802* le0; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 2))&31U)))!=0)) goto LA3; le0 = (*asgn0).kindU.S6.sons->data[((NI) 0)]; { Tsym290834* field0; if (!((*le0).kind == ((Tnodekind290020) 46))) goto LA7; field0 = (*(*(*le0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag290184) 18))&31U)))!=0); } goto LA5; LA7: ; { Tsym290834* field0; if (!((*le0).kind == ((Tnodekind290020) 45))) goto LA10; field0 = (*(*le0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym; result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag290184) 18))&31U)))!=0); } goto LA5; LA10: ; LA5: ; } LA3: ; return result0; } N_NIMCALL(Ropeobj177006*, discriminatortabledecl_534094_839829468)(Tcgen527027* m0, Ttype290840* objtype0, Tsym290834* d0) { Ropeobj177006* result0; Ropeobj177006* LOC1; Ropeobj177006* tmp0; TY530811 LOC2; NI64 LOC3; result0 = (Ropeobj177006*)0; LOC1 = (Ropeobj177006*)0; LOC1 = cgsym_530403_839829468(m0, ((NimStringDesc*) &T839829468_130)); tmp0 = discriminatortablename_534057_839829468(m0, objtype0, d0); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = tmp0; LOC3 = (NI64)0; LOC3 = lengthord_318007_3876443242((*d0).typ); LOC2[1] = rope_177401_2381377266((NI64)(LOC3 + IL64(1))); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_203), LOC2, 2); return result0; } N_NIMCALL(void, gendiscriminantcheck_547144_839829468)(Tcproc527021* p0, Tloc290816 a0, Tloc290816 tmp0, Ttype290840* objtype0, Tsym290834* field0) { Ttype290840* t0; Ropeobj177006* LOC1; NI64 L0; TY533235 LOC8; t0 = skiptypes_294099_850551059(objtype0, IL64(211106240964864)); LOC1 = (Ropeobj177006*)0; LOC1 = gentypeinfo_533941_839829468((*p0).module, t0); L0 = lengthord_318007_3876443242((*field0).typ); { NIM_BOOL LOC4; TY177507 LOC7; LOC4 = (NIM_BOOL)0; LOC4 = containsorincl_266862_2627731572((&(*(*p0).module).declaredthings), (*field0).Sup.id); if (!!(LOC4)) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = discriminatortabledecl_534094_839829468((*p0).module, t0, field0); appcg_530640_839829468((*p0).module, ((Tcfilesection527005) 9), ((NimStringDesc*) &T839829468_610), LOC7, 1); } LA5: ; memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = rdloc_536188_839829468(a0); LOC8[1] = rdloc_536188_839829468(tmp0); LOC8[2] = discriminatortablename_534057_839829468((*p0).module, t0, field0); LOC8[3] = intliteral_537270_839829468((NI64)(L0 + IL64(1))); linecg_530707_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_611), LOC8, 4); } N_NIMCALL(void, asgnfielddiscriminant_547209_839829468)(Tcproc527021* p0, Tnode290802* e0) { Tloc290816 a0; Tloc290816 tmp0; Tnode290802* dotexpr0; memset((void*)(&a0), 0, sizeof(a0)); memset((void*)(&tmp0), 0, sizeof(tmp0)); dotexpr0 = (*e0).kindU.S6.sons->data[((NI) 0)]; { if (!((*dotexpr0).kind == ((Tnodekind290020) 46))) goto LA3; dotexpr0 = (*dotexpr0).kindU.S6.sons->data[((NI) 0)]; } LA3: ; initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); gettemp_535032_839829468(p0, a0.t, (&tmp0), NIM_FALSE); expr_537248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0)); gendiscriminantcheck_547144_839829468(p0, a0, tmp0, (*(*dotexpr0).kindU.S6.sons->data[((NI) 0)]).typ, (*(*dotexpr0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym); genassignment_537264_839829468(p0, a0, tmp0, 0); } N_NIMCALL(void, genasgn_547239_839829468)(Tcproc527021* p0, Tnode290802* e0, NIM_BOOL fastasgn0) { genlinedir_530823_839829468(p0, e0); { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 3)); if (!(LOC3)) goto LA4; LOC3 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag290184) 30))&31U)))!=0); LA4: ; if (!LOC3) goto LA5; gengotovar_542258_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)]); } goto LA1; LA5: ; { NIM_BOOL LOC8; Tloc290816 a0; LOC8 = (NIM_BOOL)0; LOC8 = fielddiscriminantcheckneeded_547080_839829468(p0, e0); if (!!(LOC8)) goto LA9; memset((void*)(&a0), 0, sizeof(a0)); { Tnode290802* LOC13; Tnode290802* LOC16; LOC13 = (Tnode290802*)0; LOC13 = HEX5BHEX5D_291238_850551059(e0, ((NI) 0)); if (!((*LOC13).kind == ((Tnodekind290020) 47) || (*LOC13).kind == ((Tnodekind290020) 65))) goto LA14; LOC16 = (Tnode290802*)0; LOC16 = HEX5BHEX5D_291238_850551059(e0, ((NI) 0)); genderef_541921_839829468(p0, LOC16, (&a0), NIM_TRUE); } goto LA11; LA14: ; { initlocexpr_537283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA11: ; { if (!fastasgn0) goto LA20; a0.flags |= ((NU16)1)<<((((Tlocflag290810) 2))%(sizeof(NU16)*8)); } LA20: ; loadinto_541928_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (*e0).kindU.S6.sons->data[((NI) 1)], (&a0)); } goto LA1; LA9: ; { asgnfielddiscriminant_547209_839829468(p0, e0); } LA1: ; } N_NIMCALL(Ropeobj177006*, genasmoremitstmt_546529_839829468)(Tcproc527021* p0, Tnode290802* t0, NIM_BOOL isasmstmt0) { Ropeobj177006* result0; NimStringDesc* res0; result0 = (Ropeobj177006*)0; res0 = copyString(((NimStringDesc*) &T839829468_490)); { NI i_546547_839829468; NI HEX3Atmp_546644_839829468; NI LOC2; NI res_546647_839829468; i_546547_839829468 = (NI)0; HEX3Atmp_546644_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_293351_850551059(t0); HEX3Atmp_546644_839829468 = (NI)(LOC2 - ((NI) 1)); res_546647_839829468 = ((NI) 0); { while (1) { if (!(res_546647_839829468 <= HEX3Atmp_546644_839829468)) goto LA4; i_546547_839829468 = res_546647_839829468; switch ((*(*t0).kindU.S6.sons->data[i_546547_839829468]).kind) { case ((Tnodekind290020) 20) ... ((Tnodekind290020) 22): { res0 = resizeString(res0, (*(*t0).kindU.S6.sons->data[i_546547_839829468]).kindU.S3.strval->Sup.len + 0); appendString(res0, (*(*t0).kindU.S6.sons->data[i_546547_839829468]).kindU.S3.strval); } break; case ((Tnodekind290020) 3): { Tsym290834* sym0; sym0 = (*(*t0).kindU.S6.sons->data[i_546547_839829468]).kindU.S4.sym; { Tloc290816 a0; Ropeobj177006* LOC11; NimStringDesc* LOC12; if (!((28672 &(1U<<((NU)((*sym0).kind)&31U)))!=0)) goto LA9; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*t0).kindU.S6.sons->data[i_546547_839829468], (&a0)); LOC11 = (Ropeobj177006*)0; LOC11 = rdloc_536188_839829468(a0); LOC12 = (NimStringDesc*)0; LOC12 = HEX24_177856_2381377266(LOC11); res0 = resizeString(res0, LOC12->Sup.len + 0); appendString(res0, LOC12); } goto LA7; LA9: ; { Ropeobj177006* LOC16; NimStringDesc* LOC17; if (!((*sym0).kind == ((Tsymkind290435) 7))) goto LA14; LOC16 = (Ropeobj177006*)0; LOC16 = gettypedesc_533671_839829468((*p0).module, (*sym0).typ); LOC17 = (NimStringDesc*)0; LOC17 = HEX24_177856_2381377266(LOC16); res0 = resizeString(res0, LOC17->Sup.len + 0); appendString(res0, LOC17); } goto LA7; LA14: ; { Ropeobj177006* r0; NimStringDesc* LOC23; r0 = (*sym0).loc.r; { if (!(r0 == NIM_NIL)) goto LA21; r0 = manglename_531205_839829468(sym0); asgnRefNoCycle((void**) (&(*sym0).loc.r), r0); } LA21: ; LOC23 = (NimStringDesc*)0; LOC23 = HEX24_177856_2381377266(r0); res0 = resizeString(res0, LOC23->Sup.len + 0); appendString(res0, LOC23); } LA7: ; } break; default: { internalerror_194100_155036129((*(*t0).kindU.S6.sons->data[i_546547_839829468]).info, ((NimStringDesc*) &T839829468_612)); } break; } res_546647_839829468 += ((NI) 1); } LA4: ; } } { NIM_BOOL LOC27; LOC27 = (NIM_BOOL)0; LOC27 = isasmstmt0; if (!(LOC27)) goto LA28; LOC27 = ((Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop271004) 5))&7U)))!=0); LA28: ; if (!LOC27) goto LA29; { NimStringDesc* x_546604_839829468; NI first_546656_839829468; NI last_546658_839829468; x_546604_839829468 = (NimStringDesc*)0; first_546656_839829468 = ((NI) 0); last_546658_839829468 = ((NI) 0); { while (1) { NI j0; { while (1) { if (!!((((NU8)(res0->data[last_546658_839829468])) == ((NU8)(0)) || ((NU8)(res0->data[last_546658_839829468])) == ((NU8)(13)) || ((NU8)(res0->data[last_546658_839829468])) == ((NU8)(10))))) goto LA35; last_546658_839829468 += ((NI) 1); } LA35: ; } x_546604_839829468 = copyStrLast(res0, first_546656_839829468, (NI)(last_546658_839829468 - ((NI) 1))); j0 = ((NI) 0); { while (1) { if (!(((NU8)(x_546604_839829468->data[j0])) == ((NU8)(32)) || ((NU8)(x_546604_839829468->data[j0])) == ((NU8)(9)))) goto LA37; j0 += ((NI) 1); } LA37: ; } { if (!(((NU8)(x_546604_839829468->data[j0])) == ((NU8)(34)) || ((NU8)(x_546604_839829468->data[j0])) == ((NU8)(58)))) goto LA40; add_177487_2381377266(&result0, x_546604_839829468); add_177487_2381377266(&result0, tnl_175644_4151366050); } goto LA38; LA40: ; { if (!!(((NU8)(x_546604_839829468->data[j0]) == (NU8)(0)))) goto LA43; add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_613)); add_177487_2381377266(&result0, x_546604_839829468); add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_614)); } goto LA38; LA43: ; LA38: ; { if (!((NU8)(res0->data[last_546658_839829468]) == (NU8)(10))) goto LA47; last_546658_839829468 += ((NI) 1); } goto LA45; LA47: ; { if (!((NU8)(res0->data[last_546658_839829468]) == (NU8)(13))) goto LA50; last_546658_839829468 += ((NI) 1); { if (!((NU8)(res0->data[last_546658_839829468]) == (NU8)(10))) goto LA54; last_546658_839829468 += ((NI) 1); } LA54: ; } goto LA45; LA50: ; { goto LA32; } LA45: ; first_546656_839829468 = last_546658_839829468; } } LA32: ; } } goto LA25; LA29: ; { res0 = resizeString(res0, tnl_175644_4151366050->Sup.len + 0); appendString(res0, tnl_175644_4151366050); result0 = rope_177277_2381377266(res0); } LA25: ; return result0; } N_NIMCALL(void, genasmstmt_546659_839829468)(Tcproc527021* p0, Tnode290802* t0) { Ropeobj177006* s0; genlinedir_530823_839829468(p0, t0); s0 = genasmoremitstmt_546529_839829468(p0, t0, NIM_TRUE); { TY177507 LOC5; if (!((*p0).prc == NIM_NIL)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = s0; addf_178205_2381377266(&(*(*p0).module).s[(((Tcfilesection527005) 7))- 0], Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field17, LOC5, 1); } goto LA1; LA3: ; { TY177507 LOC7; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = s0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field17, LOC7, 1); } LA1: ; } static N_INLINE(void, gensimpleblock_542095_839829468)(Tcproc527021* p0, Tnode290802* stmts0) { TY531289 LOC1; NI LOC2; memset((void*)LOC1, 0, sizeof(LOC1)); LOC2 = (NI)0; LOC2 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0); genstmts_537244_839829468(p0, stmts0); endblock_542060_839829468(p0); } N_NIMCALL(void, gentrycpp_545865_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0) { Ropeobj177006* exc0; TY531289 LOC16; NI LOC17; NI length0; TY177507 LOC18; Ropeobj177006* LOC19; NI i0; NIM_BOOL catchallpresent0; TY531289 LOC78; Tnode290802* LOC79; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_295440_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind290808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_535032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; genlinedir_530823_839829468(p0, t0); exc0 = gettempname_531596_839829468((*p0).module); { Tsym290834* LOC10; Ropeobj177006* LOC13; LOC10 = (Tsym290834*)0; LOC10 = getcompilerproc_336746_3937434831(((NimStringDesc*) &T839829468_615)); if (!!((LOC10 == NIM_NIL))) goto LA11; LOC13 = (Ropeobj177006*)0; LOC13 = cgsym_530403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615)); } goto LA8; LA11: ; { Ropeobj177006* LOC15; LOC15 = (Ropeobj177006*)0; LOC15 = cgsym_530403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616)); } LA8: ; (*p0).nestedtrystmts = (Tnodeseq290796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode290802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0); ++(*p0).nestedtrystmts->Sup.len; memset((void*)LOC16, 0, sizeof(LOC16)); LOC17 = (NI)0; LOC17 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_617), LOC16, 0); expr_537248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0); length0 = sonslen_293351_850551059(t0); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = exc0; LOC19 = (Ropeobj177006*)0; LOC19 = ropecg_530407_839829468((*p0).module, ((NimStringDesc*) &T839829468_618), LOC18, 1); endblock_542035_839829468(p0, LOC19); { TY531289 LOC24; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 15))&31U)))!=0)) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_619), LOC24, 0); } LA22: ; (*p0).inexceptblock += ((NI) 1); i0 = ((NI) 1); catchallpresent0 = NIM_FALSE; { while (1) { NIM_BOOL LOC27; NI blen0; LOC27 = (NIM_BOOL)0; LOC27 = (i0 < length0); if (!(LOC27)) goto LA28; LOC27 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind290020) 87)); LA28: ; if (!LOC27) goto LA26; { NIM_BOOL LOC31; LOC31 = (NIM_BOOL)0; LOC31 = ((*d0).k == ((Tlockind290808) 1)); if (!(LOC31)) goto LA32; LOC31 = isemptytype_295440_850551059((*t0).typ); LA32: ; if (!LOC31) goto LA33; (*d0).k = ((Tlockind290808) 0); } LA33: ; blen0 = sonslen_293351_850551059((*t0).kindU.S6.sons->data[i0]); { Ropeobj177006** LOC39; TY531289 LOC40; if (!(((NI) 1) < i0)) goto LA37; LOC39 = (Ropeobj177006**)0; LOC39 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); memset((void*)LOC40, 0, sizeof(LOC40)); addf_178205_2381377266(LOC39, ((NimStringDesc*) &T839829468_620), LOC40, 0); } LA37: ; { TY531289 LOC45; NI LOC46; TY531289 LOC47; if (!(blen0 == ((NI) 1))) goto LA43; catchallpresent0 = NIM_TRUE; memset((void*)LOC45, 0, sizeof(LOC45)); LOC46 = (NI)0; LOC46 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0); expr_537248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC47, 0, sizeof(LOC47)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_606), LOC47, 0); endblock_542060_839829468(p0); } goto LA41; LA43: ; { Ropeobj177006* orexpr0; TY177507 LOC57; TY531289 LOC58; NI LOC59; TY531289 LOC60; orexpr0 = NIM_NIL; { NI j_545978_839829468; NI HEX3Atmp_546101_839829468; NI res_546104_839829468; j_545978_839829468 = (NI)0; HEX3Atmp_546101_839829468 = (NI)0; HEX3Atmp_546101_839829468 = (NI)(blen0 - ((NI) 2)); res_546104_839829468 = ((NI) 0); { while (1) { TY530811 LOC56; if (!(res_546104_839829468 <= HEX3Atmp_546101_839829468)) goto LA51; j_545978_839829468 = res_546104_839829468; { if (!!((orexpr0 == NIM_NIL))) goto LA54; add_177487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229)); } LA54: ; memset((void*)LOC56, 0, sizeof(LOC56)); LOC56[0] = exc0; LOC56[1] = gentypeinfo_533941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_545978_839829468]).typ); appcg_530632_839829468((*p0).module, &orexpr0, ((NimStringDesc*) &T839829468_621), LOC56, 2); res_546104_839829468 += ((NI) 1); } LA51: ; } } memset((void*)LOC57, 0, sizeof(LOC57)); LOC57[0] = orexpr0; linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_622), LOC57, 1); memset((void*)LOC58, 0, sizeof(LOC58)); LOC59 = (NI)0; LOC59 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC58, 0); expr_537248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0); memset((void*)LOC60, 0, sizeof(LOC60)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_606), LOC60, 0); endblock_542060_839829468(p0); } LA41: ; i0 += ((NI) 1); } LA26: ; } { TY531289 LOC70; NI LOC71; Tnode290802* finallyblock0; TY531289 LOC76; Ropeobj177006* LOC77; if (!!(catchallpresent0)) goto LA63; { TY531289 LOC69; if (!(((NI) 1) < i0)) goto LA67; memset((void*)LOC69, 0, sizeof(LOC69)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_620), LOC69, 0); } LA67: ; memset((void*)LOC70, 0, sizeof(LOC70)); LOC71 = (NI)0; LOC71 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC70, 0); finallyblock0 = lastson_293364_850551059(t0); { if (!((*finallyblock0).kind == ((Tnodekind290020) 107))) goto LA74; genstmts_537244_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]); } LA74: ; memset((void*)LOC76, 0, sizeof(LOC76)); LOC77 = (Ropeobj177006*)0; LOC77 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_623), LOC76, 0); line_530690_839829468(p0, ((Tcprocsection527011) 2), LOC77); endblock_542060_839829468(p0); } LA63: ; memset((void*)LOC78, 0, sizeof(LOC78)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_160), LOC78, 0); (*p0).inexceptblock -= ((NI) 1); LOC79 = (Tnode290802*)0; LOC79 = pop_316246_1689653243((&(*p0).nestedtrystmts)); { NIM_BOOL LOC82; LOC82 = (NIM_BOOL)0; LOC82 = (i0 < length0); if (!(LOC82)) goto LA83; LOC82 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind290020) 107)); LA83: ; if (!LOC82) goto LA84; gensimpleblock_542095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]); } LA84: ; } N_NIMCALL(void, line_530695_839829468)(Tcproc527021* p0, Tcprocsection527011 s0, NimStringDesc* r0) { Ropeobj177006** LOC1; Ropeobj177006* LOC2; Ropeobj177006* LOC3; LOC1 = (Ropeobj177006**)0; LOC1 = s_527179_3723162438(p0, s0); LOC2 = (Ropeobj177006*)0; LOC2 = rope_177277_2381377266(r0); LOC3 = (Ropeobj177006*)0; LOC3 = indentline_530656_839829468(p0, LOC2); add_177482_2381377266(LOC1, LOC3); } static N_INLINE(Ropeobj177006*, pop_177530_1689653243)(TY189350** s0) { Ropeobj177006* result0; NI L0; result0 = (Ropeobj177006*)0; L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1)); result0 = (*s0)->data[L0]; (*s0) = (TY189350*) setLengthSeq(&((*s0))->Sup, sizeof(Ropeobj177006*), ((NI) (L0))); return result0; } N_NIMCALL(void, gentry_546114_839829468)(Tcproc527021* p0, Tnode290802* t0, Tloc290816* d0) { NIM_BOOL LOC8; Ropeobj177006* safepoint0; TY177507 LOC17; TY177507 LOC18; TY177507 LOC37; NI LOC38; NI length0; TY531289 LOC39; TY531289 LOC40; NI LOC41; TY531289 LOC42; NI i0; Tnode290802* LOC95; TY177507 LOC103; { NIM_BOOL LOC3; NIM_BOOL LOC4; LOC3 = (NIM_BOOL)0; LOC4 = (NIM_BOOL)0; LOC4 = isemptytype_295440_850551059((*t0).typ); LOC3 = !(LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*d0).k == ((Tlockind290808) 0)); LA5: ; if (!LOC3) goto LA6; gettemp_535032_839829468(p0, (*t0).typ, d0, NIM_FALSE); } LA6: ; LOC8 = (NIM_BOOL)0; LOC8 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_624)); genlinedir_530823_839829468(p0, t0); safepoint0 = gettempname_531596_839829468((*p0).module); { Tsym290834* LOC11; Ropeobj177006* LOC14; LOC11 = (Tsym290834*)0; LOC11 = getcompilerproc_336746_3937434831(((NimStringDesc*) &T839829468_615)); if (!!((LOC11 == NIM_NIL))) goto LA12; LOC14 = (Ropeobj177006*)0; LOC14 = cgsym_530403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615)); } goto LA9; LA12: ; { Ropeobj177006* LOC16; LOC16 = (Ropeobj177006*)0; LOC16 = cgsym_530403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616)); } LA9: ; memset((void*)LOC17, 0, sizeof(LOC17)); LOC17[0] = safepoint0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 0), ((NimStringDesc*) &T839829468_625), LOC17, 1); memset((void*)LOC18, 0, sizeof(LOC18)); LOC18[0] = safepoint0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_626), LOC18, 1); { NIM_BOOL LOC21; TY177507 LOC24; LOC21 = (NIM_BOOL)0; LOC21 = isdefined_198011_1967573533(((NimStringDesc*) &T839829468_627)); if (!LOC21) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); LOC24[0] = safepoint0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_628), LOC24, 1); } goto LA19; LA22: ; { NIM_BOOL LOC26; TY177507 LOC29; LOC26 = (NIM_BOOL)0; LOC26 = isdefined_198011_1967573533(((NimStringDesc*) &T839829468_629)); if (!LOC26) goto LA27; memset((void*)LOC29, 0, sizeof(LOC29)); LOC29[0] = safepoint0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_630), LOC29, 1); } goto LA19; LA27: ; { NIM_BOOL LOC31; TY177507 LOC34; LOC31 = (NIM_BOOL)0; LOC31 = isdefined_198011_1967573533(((NimStringDesc*) &T839829468_631)); if (!LOC31) goto LA32; memset((void*)LOC34, 0, sizeof(LOC34)); LOC34[0] = safepoint0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_632), LOC34, 1); } goto LA19; LA32: ; { TY177507 LOC36; memset((void*)LOC36, 0, sizeof(LOC36)); LOC36[0] = safepoint0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_628), LOC36, 1); } LA19: ; memset((void*)LOC37, 0, sizeof(LOC37)); LOC37[0] = safepoint0; LOC38 = (NI)0; LOC38 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_633), LOC37, 1); length0 = sonslen_293351_850551059(t0); (*p0).nestedtrystmts = (Tnodeseq290796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode290802*)); asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0); ++(*p0).nestedtrystmts->Sup.len; expr_537248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC39, 0, sizeof(LOC39)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_605), LOC39, 0); endblock_542060_839829468(p0); memset((void*)LOC40, 0, sizeof(LOC40)); LOC41 = (NI)0; LOC41 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_634), LOC40, 0); memset((void*)LOC42, 0, sizeof(LOC42)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_605), LOC42, 0); { TY531289 LOC47; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 15))&31U)))!=0)) goto LA45; memset((void*)LOC47, 0, sizeof(LOC47)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_619), LOC47, 0); } LA45: ; (*p0).inexceptblock += ((NI) 1); i0 = ((NI) 1); { while (1) { NIM_BOOL LOC50; NI blen0; LOC50 = (NIM_BOOL)0; LOC50 = (i0 < length0); if (!(LOC50)) goto LA51; LOC50 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind290020) 87)); LA51: ; if (!LOC50) goto LA49; { NIM_BOOL LOC54; LOC54 = (NIM_BOOL)0; LOC54 = ((*d0).k == ((Tlockind290808) 1)); if (!(LOC54)) goto LA55; LOC54 = isemptytype_295440_850551059((*t0).typ); LA55: ; if (!LOC54) goto LA56; (*d0).k = ((Tlockind290808) 0); } LA56: ; blen0 = sonslen_293351_850551059((*t0).kindU.S6.sons->data[i0]); { TY531289 LOC67; NI LOC68; TY177507 LOC69; TY531289 LOC70; if (!(blen0 == ((NI) 1))) goto LA60; { TY531289 LOC66; if (!(((NI) 1) < i0)) goto LA64; memset((void*)LOC66, 0, sizeof(LOC66)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_635), LOC66, 0); } LA64: ; memset((void*)LOC67, 0, sizeof(LOC67)); LOC68 = (NI)0; LOC68 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC67, 0); memset((void*)LOC69, 0, sizeof(LOC69)); LOC69[0] = safepoint0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_636), LOC69, 1); expr_537248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0); memset((void*)LOC70, 0, sizeof(LOC70)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_606), LOC70, 0); endblock_542060_839829468(p0); } goto LA58; LA60: ; { Ropeobj177006* orexpr0; TY177507 LOC91; NI LOC92; TY177507 LOC93; TY531289 LOC94; orexpr0 = NIM_NIL; { NI j_546247_839829468; NI HEX3Atmp_546521_839829468; NI res_546524_839829468; j_546247_839829468 = (NI)0; HEX3Atmp_546521_839829468 = (NI)0; HEX3Atmp_546521_839829468 = (NI)(blen0 - ((NI) 2)); res_546524_839829468 = ((NI) 0); { while (1) { NimStringDesc* isobjformat0; TY177507 LOC86; if (!(res_546524_839829468 <= HEX3Atmp_546521_839829468)) goto LA74; j_546247_839829468 = res_546524_839829468; { if (!!((orexpr0 == NIM_NIL))) goto LA77; add_177487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229)); } LA77: ; { NIM_BOOL LOC81; LOC81 = (NIM_BOOL)0; LOC81 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC81) goto LA82; LOC81 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA82: ; if (!!(LOC81)) goto LA83; isobjformat0 = copyString(((NimStringDesc*) &T839829468_637)); } goto LA79; LA83: ; { isobjformat0 = copyString(((NimStringDesc*) &T839829468_638)); } LA79: ; memset((void*)LOC86, 0, sizeof(LOC86)); LOC86[0] = gentypeinfo_533941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_546247_839829468]).typ); appcg_530632_839829468((*p0).module, &orexpr0, isobjformat0, LOC86, 1); res_546524_839829468 += ((NI) 1); } LA74: ; } } { if (!(((NI) 1) < i0)) goto LA89; line_530695_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_620)); } LA89: ; memset((void*)LOC91, 0, sizeof(LOC91)); LOC91[0] = orexpr0; LOC92 = (NI)0; LOC92 = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_639), LOC91, 1); memset((void*)LOC93, 0, sizeof(LOC93)); LOC93[0] = safepoint0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_636), LOC93, 1); expr_537248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0); memset((void*)LOC94, 0, sizeof(LOC94)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_606), LOC94, 0); endblock_542060_839829468(p0); } LA58: ; i0 += ((NI) 1); } LA49: ; } (*p0).inexceptblock -= ((NI) 1); LOC95 = (Tnode290802*)0; LOC95 = pop_316246_1689653243((&(*p0).nestedtrystmts)); endblock_542060_839829468(p0); { NIM_BOOL LOC98; Ropeobj177006* LOC102; LOC98 = (NIM_BOOL)0; LOC98 = (i0 < length0); if (!(LOC98)) goto LA99; LOC98 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind290020) 107)); LA99: ; if (!LOC98) goto LA100; (*p0).finallysafepoints = (TY189350*) incrSeqV2(&((*p0).finallysafepoints)->Sup, sizeof(Ropeobj177006*)); asgnRefNoCycle((void**) (&(*p0).finallysafepoints->data[(*p0).finallysafepoints->Sup.len]), safepoint0); ++(*p0).finallysafepoints->Sup.len; gensimpleblock_542095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]); LOC102 = (Ropeobj177006*)0; LOC102 = pop_177530_1689653243((&(*p0).finallysafepoints)); } LA100: ; memset((void*)LOC103, 0, sizeof(LOC103)); LOC103[0] = safepoint0; linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_640), LOC103, 1); } N_NIMCALL(NimStringDesc*, getraisefrmt_544824_839829468)(Tcproc527021* p0) { NimStringDesc* result0; result0 = (NimStringDesc*)0; result0 = copyString(((NimStringDesc*) &T839829468_641)); return result0; } N_NIMCALL(void, genraisestmt_544828_839829468)(Tcproc527021* p0, Tnode290802* t0) { { Tnode290802* finallyblock0; if (!(((NI) 0) < (*p0).inexceptblock)) goto LA3; finallyblock0 = lastson_293364_850551059((*p0).nestedtrystmts->data[(NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) 1))]); { if (!((*finallyblock0).kind == ((Tnodekind290020) 107))) goto LA7; gensimpleblock_542095_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]); } LA7: ; } LA3: ; { Tloc290816 a0; Ropeobj177006* e0; Ttype290840* typ0; NimStringDesc* LOC13; TY530811 LOC14; if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 1)))) goto LA11; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0)); e0 = rdloc_536188_839829468(a0); typ0 = skiptypes_294099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106247256320)); genlinedir_530823_839829468(p0, t0); LOC13 = (NimStringDesc*)0; LOC13 = getraisefrmt_544824_839829468(p0); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = e0; LOC14[1] = makecstring_189638_155036129((*(*(*typ0).sym).name).s); linecg_530707_839829468(p0, ((Tcprocsection527011) 2), LOC13, LOC14, 2); } goto LA9; LA11: ; { genlinedir_530823_839829468(p0, t0); { NIM_BOOL LOC18; NIM_BOOL LOC19; TY531289 LOC24; Ropeobj177006* LOC25; LOC18 = (NIM_BOOL)0; LOC19 = (NIM_BOOL)0; LOC19 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC19) goto LA20; LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA20: ; LOC18 = LOC19; if (!(LOC18)) goto LA21; LOC18 = !(((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 31))&63U)))!=0)); LA21: ; if (!LOC18) goto LA22; memset((void*)LOC24, 0, sizeof(LOC24)); LOC25 = (Ropeobj177006*)0; LOC25 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_623), LOC24, 0); line_530690_839829468(p0, ((Tcprocsection527011) 2), LOC25); } goto LA16; LA22: ; { TY531289 LOC27; memset((void*)LOC27, 0, sizeof(LOC27)); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_642), LOC27, 0); } LA16: ; } LA9: ; } N_NIMCALL(void, gentypesection_536184_839829468)(Tcgen527027* m0, Tnode290802* n0) { } N_NIMCALL(Tcfilesection527005, determinesection_546819_839829468)(Tnode290802* n0) { Tcfilesection527005 result0; result0 = (Tcfilesection527005)0; result0 = ((Tcfilesection527005) 7); { NIM_BOOL LOC3; NI LOC4; NimStringDesc* sec0; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = len_291081_850551059(n0); LOC3 = (((NI) 1) <= LOC4); if (!(LOC3)) goto LA5; LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind >= ((Tnodekind290020) 20) && (*(*n0).kindU.S6.sons->data[((NI) 0)]).kind <= ((Tnodekind290020) 22)); LA5: ; if (!LOC3) goto LA6; sec0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S3.strval; { NIM_BOOL LOC10; LOC10 = (NIM_BOOL)0; LOC10 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_643)); if (!LOC10) goto LA11; result0 = ((Tcfilesection527005) 3); } goto LA8; LA11: ; { NIM_BOOL LOC14; LOC14 = (NIM_BOOL)0; LOC14 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_644)); if (!LOC14) goto LA15; result0 = ((Tcfilesection527005) 9); } goto LA8; LA15: ; { NIM_BOOL LOC18; LOC18 = (NIM_BOOL)0; LOC18 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_645)); if (!LOC18) goto LA19; result0 = ((Tcfilesection527005) 1); } goto LA8; LA19: ; LA8: ; } LA6: ; return result0; } N_NIMCALL(void, genemit_546839_839829468)(Tcproc527021* p0, Tnode290802* t0) { Ropeobj177006* s0; s0 = genasmoremitstmt_546529_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], NIM_FALSE); { Tcfilesection527005 section0; Tnode290802* LOC5; if (!((*p0).prc == NIM_NIL)) goto LA3; LOC5 = (Tnode290802*)0; LOC5 = HEX5BHEX5D_291238_850551059(t0, ((NI) 1)); section0 = determinesection_546819_839829468(LOC5); genclinedir_530813_839829468(&(*(*p0).module).s[(section0)- 0], (*t0).info); add_177482_2381377266(&(*(*p0).module).s[(section0)- 0], s0); } goto LA1; LA3: ; { genlinedir_530823_839829468(p0, t0); line_530690_839829468(p0, ((Tcprocsection527011) 2), s0); } LA1: ; } N_NIMCALL(void, genbreakpoint_546862_839829468)(Tcproc527021* p0, Tnode290802* t0) { NimStringDesc* name0; name0 = (NimStringDesc*)0; { TY533238 LOC12; NI LOC13; NimStringDesc* LOC14; if (!(((*p0).options &(1U<<((NU)(((Toption168009) 17))&31U)))!=0)) goto LA3; { if (!((*t0).kind == ((Tnodekind290020) 34))) goto LA7; name0 = nsuNormalize((*(*t0).kindU.S6.sons->data[((NI) 1)]).kindU.S3.strval); } goto LA5; LA7: ; { NimStringDesc* LOC10; NimStringDesc* LOC11; breakpointid_546860_839829468 += ((NI) 1); LOC10 = (NimStringDesc*)0; LOC11 = (NimStringDesc*)0; LOC11 = nimIntToStr(breakpointid_546860_839829468); LOC10 = rawNewString(LOC11->Sup.len + 2); appendString(LOC10, ((NimStringDesc*) &T839829468_646)); appendString(LOC10, LOC11); name0 = LOC10; } LA5: ; genlinedir_530823_839829468(p0, t0); memset((void*)LOC12, 0, sizeof(LOC12)); LOC13 = (NI)0; LOC13 = tolinenumber_190415_155036129((*t0).info); LOC12[0] = rope_177401_2381377266(((NI64) (LOC13))); LOC14 = (NimStringDesc*)0; LOC14 = tofilename_190260_155036129((*t0).info.fileindex); LOC12[1] = makecstring_189638_155036129(LOC14); LOC12[2] = makecstring_189638_155036129(name0); appcg_530632_839829468((*p0).module, &gbreakpoints_546861_839829468, ((NimStringDesc*) &T839829468_647), LOC12, 3); } LA3: ; } N_NIMCALL(void, genwatchpoint_547016_839829468)(Tcproc527021* p0, Tnode290802* n0) { Tloc290816 a0; Ttype290840* typ0; TY533238 LOC5; NimStringDesc* LOC6; { { if (!!((((*p0).options &(1U<<((NU)(((Toption168009) 17))&31U)))!=0))) goto LA3; goto BeforeRet; } LA3: ; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0)); typ0 = skiptypes_294099_850551059((*(*n0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = addrloc_536204_839829468(a0); LOC6 = (NimStringDesc*)0; LOC6 = rendertree_309044_382274130((*n0).kindU.S6.sons->data[((NI) 1)], 0); LOC5[1] = makecstring_189638_155036129(LOC6); LOC5[2] = gentypeinfo_533941_839829468((*p0).module, typ0); linecg_530707_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_648), LOC5, 3); }BeforeRet: ; } N_NIMCALL(void, genpragma_547039_839829468)(Tcproc527021* p_547041_839829468, Tnode290802* n0) { { NI i_547054_839829468; NI HEX3Atmp_547073_839829468; NI LOC2; NI res_547076_839829468; i_547054_839829468 = (NI)0; HEX3Atmp_547073_839829468 = (NI)0; LOC2 = (NI)0; LOC2 = sonslen_293351_850551059(n0); HEX3Atmp_547073_839829468 = (NI)(LOC2 - ((NI) 1)); res_547076_839829468 = ((NI) 0); { while (1) { Tnode290802* it0; Tspecialword273003 LOC5; if (!(res_547076_839829468 <= HEX3Atmp_547073_839829468)) goto LA4; i_547054_839829468 = res_547076_839829468; it0 = (*n0).kindU.S6.sons->data[i_547054_839829468]; LOC5 = (Tspecialword273003)0; LOC5 = whichpragma_316911_2616423590(it0); switch (LOC5) { case ((Tspecialword273003) 191): { genemit_546839_839829468(p_547041_839829468, it0); } break; case ((Tspecialword273003) 131): { genbreakpoint_546862_839829468(p_547041_839829468, it0); } break; case ((Tspecialword273003) 176): { genwatchpoint_547016_839829468(p_547041_839829468, it0); } break; case ((Tspecialword273003) 183): { Tcproc527021* p0; Ropeobj177006** LOC10; p0 = newproc_527206_3723162438(NIM_NIL, (*p_547041_839829468).module); (*p0).options = ((*p0).options & ~ 98304); genstmts_537244_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)]); LOC10 = (Ropeobj177006**)0; LOC10 = s_527179_3723162438(p0, ((Tcprocsection527011) 2)); asgnRefNoCycle((void**) (&(*(*p0).module).injectstmt), (*LOC10)); } break; default: { } break; } res_547076_839829468 += ((NI) 1); } LA4: ; } } } N_NIMCALL(void, genparforstmt_544208_839829468)(Tcproc527021* p0, Tnode290802* t0) { NI oldbreakidx_544411_839829468; Tsym290834* forloopvar0; Tloc290816 rangea0; Tloc290816 rangeb0; Tnode290802* call0; TY533235 LOC1; NimStringDesc* LOC2; TY531289 LOC3; (*p0).withinloop += ((NI) 1); genlinedir_530823_839829468(p0, t0); oldbreakidx_544411_839829468 = (*p0).breakidx; forloopvar0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; memset((void*)(&rangea0), 0, sizeof(rangea0)); memset((void*)(&rangeb0), 0, sizeof(rangeb0)); assignlocalvar_536614_839829468(p0, forloopvar0); call0 = (*t0).kindU.S6.sons->data[((NI) 1)]; initlocexpr_537283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 1)], (&rangea0)); initlocexpr_537283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 2)], (&rangeb0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_536188_839829468((*forloopvar0).loc); LOC1[1] = rdloc_536188_839829468(rangea0); LOC1[2] = rdloc_536188_839829468(rangeb0); LOC2 = (NimStringDesc*)0; LOC2 = getstr_295230_850551059((*call0).kindU.S6.sons->data[((NI) 3)]); LOC1[3] = rope_177277_2381377266(LOC2); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_649), LOC1, 4); memset((void*)LOC3, 0, sizeof(LOC3)); (*p0).breakidx = startblock_541978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC3, 0); (*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE; genstmts_537244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 2)]); endblock_542060_839829468(p0); (*p0).breakidx = oldbreakidx_544411_839829468; (*p0).withinloop -= ((NI) 1); } N_NIMCALL(void, genstate_542117_839829468)(Tcproc527021* p0, Tnode290802* n0) { NI64 idx0; TY177507 LOC9; { NIM_BOOL LOC3; NI LOC4; NimStringDesc* LOC8; LOC3 = (NIM_BOOL)0; LOC4 = (NI)0; LOC4 = len_291081_850551059(n0); LOC3 = (LOC4 == ((NI) 1)); if (!(LOC3)) goto LA5; LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 6)); LA5: ; if (!!(LOC3)) goto LA6; LOC8 = (NimStringDesc*)0; LOC8 = HEX24_194185_1689653243(T839829468_650); internalerror_194113_155036129(LOC8); } LA6: ; idx0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval; memset((void*)LOC9, 0, sizeof(LOC9)); LOC9[0] = rope_177401_2381377266(idx0); linefmt_530714_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_652), LOC9, 1); } N_NIMCALL(void, gengotostate_542144_839829468)(Tcproc527021* p0, Tnode290802* n0) { Tloc290816 a0; TY177507 LOC1; TY531289 LOC2; TY531289 LOC7; memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = rdloc_536188_839829468(a0); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_603), LOC1, 1); (*p0).beforeretneeded = NIM_TRUE; memset((void*)LOC2, 0, sizeof(LOC2)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_653), LOC2, 0); { NI64 i_542214_839829468; NI64 HEX3Atmp_542223_839829468; NI64 res_542226_839829468; i_542214_839829468 = (NI64)0; HEX3Atmp_542223_839829468 = (NI64)0; HEX3Atmp_542223_839829468 = lastord_318004_3876443242((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ); res_542226_839829468 = IL64(0); { while (1) { TY177507 LOC6; if (!(res_542226_839829468 <= HEX3Atmp_542223_839829468)) goto LA5; i_542214_839829468 = res_542226_839829468; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = rope_177401_2381377266(i_542214_839829468); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_654), LOC6, 1); res_542226_839829468 += ((NI) 1); } LA5: ; } } memset((void*)LOC7, 0, sizeof(LOC7)); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_160), LOC7, 0); } N_NIMCALL(void, genbreakstate_542229_839829468)(Tcproc527021* p0, Tnode290802* n0) { Tloc290816 a0; memset((void*)(&a0), 0, sizeof(a0)); { TY177507 LOC5; if (!((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 155))) goto LA3; initlocexpr_537283_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)], (&a0)); memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rdloc_536188_839829468(a0); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_655), LOC5, 1); } goto LA1; LA3: ; { TY177507 LOC7; initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rdloc_536188_839829468(a0); linef_530700_839829468(p0, ((Tcprocsection527011) 2), ((NimStringDesc*) &T839829468_656), LOC7, 1); } LA1: ; } N_NIMCALL(void, expr_537248_839829468)(Tcproc527021* p0, Tnode290802* n0, Tloc290816* d0) { switch ((*n0).kind) { case ((Tnodekind290020) 3): { Tsym290834* sym0; sym0 = (*n0).kindU.S4.sym; switch ((*sym0).kind) { case ((Tsymkind290435) 13): { { if (!!(((33554448 & (*sym0).flags) == 0))) goto LA5; fillprocloc_537201_839829468(sym0); genprocprototype_537254_839829468((*p0).module, sym0); } goto LA3; LA5: ; { genproc_530951_839829468((*p0).module, sym0); } LA3: ; putlocintodest_537258_839829468(p0, d0, (*sym0).loc); } break; case ((Tsymkind290435) 12): case ((Tsymkind290435) 15): case ((Tsymkind290435) 14): { { NimStringDesc* LOC13; if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag290184) 23))&31U)))!=0)) goto LA11; LOC13 = (NimStringDesc*)0; LOC13 = rawNewString((*(*sym0).name).s->Sup.len + 48); appendString(LOC13, ((NimStringDesc*) &T839829468_270)); appendString(LOC13, (*(*sym0).name).s); localerror_194085_155036129((*n0).info, LOC13); } LA11: ; genproc_530951_839829468((*p0).module, sym0); { NIM_BOOL LOC16; NimStringDesc* LOC20; LOC16 = (NIM_BOOL)0; LOC16 = ((*sym0).loc.r == NIM_NIL); if (LOC16) goto LA17; LOC16 = ((*sym0).loc.t == NIM_NIL); LA17: ; if (!LOC16) goto LA18; LOC20 = (NimStringDesc*)0; LOC20 = rawNewString((*(*sym0).name).s->Sup.len + 20); appendString(LOC20, ((NimStringDesc*) &T839829468_271)); appendString(LOC20, (*(*sym0).name).s); internalerror_194100_155036129((*n0).info, LOC20); } LA18: ; putlocintodest_537258_839829468(p0, d0, (*sym0).loc); } break; case ((Tsymkind290435) 10): { { NIM_BOOL LOC24; Ropeobj177006* LOC27; LOC24 = (NIM_BOOL)0; LOC24 = issimpleconst_530311_839829468((*sym0).typ); if (!LOC24) goto LA25; LOC27 = (Ropeobj177006*)0; LOC27 = genliteral_547476_839829468(p0, (*sym0).ast, (*sym0).typ); putintodest_548468_839829468(p0, d0, (*n0).typ, LOC27, ((Tstorageloc290812) 1)); } goto LA22; LA25: ; { gencomplexconst_556249_839829468(p0, sym0, d0); } LA22: ; } break; case ((Tsymkind290435) 19): { Ropeobj177006* LOC30; LOC30 = (Ropeobj177006*)0; LOC30 = rope_177401_2381377266(((NI64) ((*sym0).position))); putintodest_548468_839829468(p0, d0, (*n0).typ, LOC30, ((Tstorageloc290812) 0)); } break; case ((Tsymkind290435) 8): case ((Tsymkind290435) 20): case ((Tsymkind290435) 11): case ((Tsymkind290435) 9): { { if (!!(((4194312 & (*sym0).flags) == 0))) goto LA34; genvarprototype_537236_839829468((*p0).module, sym0); } LA34: ; { NIM_BOOL LOC38; NimStringDesc* LOC42; NimStringDesc* LOC43; LOC38 = (NIM_BOOL)0; LOC38 = ((*sym0).loc.r == NIM_NIL); if (LOC38) goto LA39; LOC38 = ((*sym0).loc.t == NIM_NIL); LA39: ; if (!LOC38) goto LA40; LOC42 = (NimStringDesc*)0; LOC43 = (NimStringDesc*)0; LOC43 = nimIntToStr((*sym0).Sup.id); LOC42 = rawNewString((*(*sym0).name).s->Sup.len + LOC43->Sup.len + 20); appendString(LOC42, ((NimStringDesc*) &T839829468_285)); appendString(LOC42, (*(*sym0).name).s); appendString(LOC42, ((NimStringDesc*) &T839829468_12)); appendString(LOC42, LOC43); internalerror_194100_155036129((*n0).info, LOC42); } LA40: ; { if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag290184) 22))&31U)))!=0)) goto LA46; accessthreadlocalvar_530945_839829468(p0, sym0); { NIM_BOOL LOC50; Ropeobj177006* LOC53; LOC50 = (NIM_BOOL)0; LOC50 = emulatedthreadvars_530949_839829468(); if (!LOC50) goto LA51; LOC53 = (Ropeobj177006*)0; LOC53 = HEX26_177452_2381377266(((NimStringDesc*) &T839829468_288), (*sym0).loc.r); putintodest_548468_839829468(p0, d0, (*sym0).loc.t, LOC53, ((Tstorageloc290812) 0)); } goto LA48; LA51: ; { putlocintodest_537258_839829468(p0, d0, (*sym0).loc); } LA48: ; } goto LA44; LA46: ; { putlocintodest_537258_839829468(p0, d0, (*sym0).loc); } LA44: ; } break; case ((Tsymkind290435) 5): { { NIM_BOOL LOC59; NimStringDesc* LOC63; NimStringDesc* LOC64; LOC59 = (NIM_BOOL)0; LOC59 = ((*sym0).loc.r == NIM_NIL); if (LOC59) goto LA60; LOC59 = ((*sym0).loc.t == NIM_NIL); LA60: ; if (!LOC59) goto LA61; LOC63 = (NimStringDesc*)0; LOC64 = (NimStringDesc*)0; LOC64 = nimIntToStr((*sym0).Sup.id); LOC63 = rawNewString((*(*sym0).name).s->Sup.len + LOC64->Sup.len + 21); appendString(LOC63, ((NimStringDesc*) &T839829468_289)); appendString(LOC63, (*(*sym0).name).s); appendString(LOC63, ((NimStringDesc*) &T839829468_12)); appendString(LOC63, LOC64); internalerror_194100_155036129((*n0).info, LOC63); } LA61: ; putlocintodest_537258_839829468(p0, d0, (*sym0).loc); } break; case ((Tsymkind290435) 3): { { NIM_BOOL LOC68; NimStringDesc* LOC72; NimStringDesc* LOC73; LOC68 = (NIM_BOOL)0; LOC68 = ((*sym0).loc.r == NIM_NIL); if (LOC68) goto LA69; LOC68 = ((*sym0).loc.t == NIM_NIL); LA69: ; if (!LOC68) goto LA70; LOC72 = (NimStringDesc*)0; LOC73 = (NimStringDesc*)0; LOC73 = nimIntToStr((*sym0).Sup.id); LOC72 = rawNewString((*(*sym0).name).s->Sup.len + LOC73->Sup.len + 22); appendString(LOC72, ((NimStringDesc*) &T839829468_290)); appendString(LOC72, (*(*sym0).name).s); appendString(LOC72, ((NimStringDesc*) &T839829468_12)); appendString(LOC72, LOC73); internalerror_194100_155036129((*n0).info, LOC72); } LA70: ; putlocintodest_537258_839829468(p0, d0, (*sym0).loc); } break; default: { NimStringDesc* LOC75; LOC75 = (NimStringDesc*)0; LOC75 = rawNewString(reprEnum((NI)(*sym0).kind, (&NTI290435))->Sup.len + 22); appendString(LOC75, ((NimStringDesc*) &T839829468_291)); appendString(LOC75, reprEnum((NI)(*sym0).kind, (&NTI290435))); appendString(LOC75, ((NimStringDesc*) &T839829468_292)); internalerror_194100_155036129((*n0).info, LOC75); } break; } } break; case ((Tnodekind290020) 23): { { NIM_BOOL LOC79; Ropeobj177006* LOC82; LOC79 = (NIM_BOOL)0; LOC79 = isemptytype_295440_850551059((*n0).typ); if (!!(LOC79)) goto LA80; LOC82 = (Ropeobj177006*)0; LOC82 = genliteral_537273_839829468(p0, n0); putintodest_548468_839829468(p0, d0, (*n0).typ, LOC82, ((Tstorageloc290812) 0)); } LA80: ; } break; case ((Tnodekind290020) 20) ... ((Tnodekind290020) 22): { Ropeobj177006* LOC84; LOC84 = (Ropeobj177006*)0; LOC84 = genliteral_537273_839829468(p0, n0); putdataintodest_548436_839829468(p0, d0, (*n0).typ, LOC84); } break; case ((Tnodekind290020) 6) ... ((Tnodekind290020) 15): case ((Tnodekind290020) 16) ... ((Tnodekind290020) 19): case ((Tnodekind290020) 5): { Ropeobj177006* LOC86; LOC86 = (Ropeobj177006*)0; LOC86 = genliteral_537273_839829468(p0, n0); putintodest_548468_839829468(p0, d0, (*n0).typ, LOC86, ((Tstorageloc290812) 0)); } break; case ((Tnodekind290020) 27): case ((Tnodekind290020) 32): case ((Tnodekind290020) 29): case ((Tnodekind290020) 30): case ((Tnodekind290020) 31): case ((Tnodekind290020) 26): case ((Tnodekind290020) 28): { Tnode290802* op0; genlinedir_530823_839829468(p0, n0); op0 = (*n0).kindU.S6.sons->data[((NI) 0)]; { Tloc290816 a0; if (!(*n0).typ == 0) goto LA90; memset((void*)(&a0), 0, sizeof(a0)); { NIM_BOOL LOC94; LOC94 = (NIM_BOOL)0; LOC94 = ((*op0).kind == ((Tnodekind290020) 3)); if (!(LOC94)) goto LA95; LOC94 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic290524) 0))); LA95: ; if (!LOC94) goto LA96; genmagicexpr_555033_839829468(p0, n0, (&a0), (*(*op0).kindU.S4.sym).magic); } goto LA92; LA96: ; { gencall_541632_839829468(p0, n0, (&a0)); } LA92: ; } goto LA88; LA90: ; { { NIM_BOOL LOC102; LOC102 = (NIM_BOOL)0; LOC102 = ((*op0).kind == ((Tnodekind290020) 3)); if (!(LOC102)) goto LA103; LOC102 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic290524) 0))); LA103: ; if (!LOC102) goto LA104; genmagicexpr_555033_839829468(p0, n0, d0, (*(*op0).kindU.S4.sym).magic); } goto LA100; LA104: ; { gencall_541632_839829468(p0, n0, d0); } LA100: ; } LA88: ; } break; case ((Tnodekind290020) 39): { { NIM_BOOL LOC110; NI LOC112; Ropeobj177006* LOC115; LOC110 = (NIM_BOOL)0; LOC110 = isdeepconstexpr_316566_2616423590(n0); if (!(LOC110)) goto LA111; LOC112 = (NI)0; LOC112 = len_291081_850551059(n0); LOC110 = !((LOC112 == ((NI) 0))); LA111: ; if (!LOC110) goto LA113; LOC115 = (Ropeobj177006*)0; LOC115 = gensetnode_547664_839829468(p0, n0); putintodest_548468_839829468(p0, d0, (*n0).typ, LOC115, ((Tstorageloc290812) 0)); } goto LA108; LA113: ; { gensetconstr_555496_839829468(p0, n0, d0); } LA108: ; } break; case ((Tnodekind290020) 41): { { NIM_BOOL LOC120; NI LOC122; LOC120 = (NIM_BOOL)0; LOC120 = isdeepconstexpr_316566_2616423590(n0); if (!(LOC120)) goto LA121; LOC122 = (NI)0; LOC122 = len_291081_850551059(n0); LOC120 = !((LOC122 == ((NI) 0))); LA121: ; if (!LOC120) goto LA123; exprcomplexconst_556684_839829468(p0, n0, d0); } goto LA118; LA123: ; { Ttype290840* LOC126; LOC126 = (Ttype290840*)0; LOC126 = skiptypes_294099_850551059((*n0).typ, IL64(211106242013440)); if (!((*LOC126).kind == ((Ttypekind290244) 24))) goto LA127; genseqconstr_553004_839829468(p0, n0, d0); } goto LA118; LA127: ; { genarrayconstr_556207_839829468(p0, n0, d0); } LA118: ; } break; case ((Tnodekind290020) 37): { { NIM_BOOL LOC133; NI LOC135; LOC133 = (NIM_BOOL)0; LOC133 = isdeepconstexpr_316566_2616423590(n0); if (!(LOC133)) goto LA134; LOC135 = (NI)0; LOC135 = len_291081_850551059(n0); LOC133 = !((LOC135 == ((NI) 0))); LA134: ; if (!LOC133) goto LA136; exprcomplexconst_556684_839829468(p0, n0, d0); } goto LA131; LA136: ; { gentupleconstr_555618_839829468(p0, n0, d0); } LA131: ; } break; case ((Tnodekind290020) 38): { genobjconstr_552903_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 61): { gencast_554537_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 58): case ((Tnodekind290020) 59): case ((Tnodekind290020) 60): { genconv_554632_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 64): case ((Tnodekind290020) 63): { genaddr_551051_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 42): { genbracketexpr_552277_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 47): case ((Tnodekind290020) 65): { genderef_541921_839829468(p0, n0, d0, NIM_FALSE); } break; case ((Tnodekind290020) 45): { genrecordfield_551448_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 46): { gencheckedrecordfield_552046_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 127): case ((Tnodekind290020) 112): { genblock_544083_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 126): { genstmtlistexpr_556402_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 115): { { NI i_557023_839829468; NI HEX3Atmp_557276_839829468; NI LOC151; NI res_557279_839829468; i_557023_839829468 = (NI)0; HEX3Atmp_557276_839829468 = (NI)0; LOC151 = (NI)0; LOC151 = sonslen_293351_850551059(n0); HEX3Atmp_557276_839829468 = (NI)(LOC151 - ((NI) 1)); res_557279_839829468 = ((NI) 0); { while (1) { if (!(res_557279_839829468 <= HEX3Atmp_557276_839829468)) goto LA153; i_557023_839829468 = res_557279_839829468; genstmts_537244_839829468(p0, (*n0).kindU.S6.sons->data[i_557023_839829468]); res_557279_839829468 += ((NI) 1); } LA153: ; } } } break; case ((Tnodekind290020) 48): case ((Tnodekind290020) 92): { genif_542982_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 93): { expr_537248_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[((NI) 0)], d0); } break; case ((Tnodekind290020) 66): { downconv_556581_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 67): { upconv_556431_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 68): { genrangechck_554590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_563)); } break; case ((Tnodekind290020) 69): { genrangechck_554590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_564)); } break; case ((Tnodekind290020) 70): { genrangechck_554590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_565)); } break; case ((Tnodekind290020) 71): { convstrtocstr_554642_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 72): { convcstrtostr_554654_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 51): case ((Tnodekind290020) 52): { Tsym290834* sym0; sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; genproc_530951_839829468((*p0).module, sym0); { NIM_BOOL LOC166; NimStringDesc* LOC170; LOC166 = (NIM_BOOL)0; LOC166 = ((*sym0).loc.r == NIM_NIL); if (LOC166) goto LA167; LOC166 = ((*sym0).loc.t == NIM_NIL); LA167: ; if (!LOC166) goto LA168; LOC170 = (NimStringDesc*)0; LOC170 = rawNewString((*(*sym0).name).s->Sup.len + 20); appendString(LOC170, ((NimStringDesc*) &T839829468_271)); appendString(LOC170, (*(*sym0).name).s); internalerror_194100_155036129((*n0).info, LOC170); } LA168: ; putlocintodest_537258_839829468(p0, d0, (*sym0).loc); } break; case ((Tnodekind290020) 155): { genclosure_555836_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 1): { } break; case ((Tnodekind290020) 96): { genwhilestmt_543984_839829468(p0, n0); } break; case ((Tnodekind290020) 99): case ((Tnodekind290020) 100): { genvarstmt_542854_839829468(p0, n0); } break; case ((Tnodekind290020) 101): { genconststmt_542909_839829468(p0, n0); } break; case ((Tnodekind290020) 94): { internalerror_194100_155036129((*n0).info, ((NimStringDesc*) &T839829468_594)); } break; case ((Tnodekind290020) 97): { gencase_545826_839829468(p0, n0, d0); } break; case ((Tnodekind290020) 109): { genreturnstmt_543617_839829468(p0, n0); } break; case ((Tnodekind290020) 110): { genbreakstmt_544444_839829468(p0, n0); } break; case ((Tnodekind290020) 73): { { if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag290427) 14))&15U)))!=0))) goto LA183; genasgn_547239_839829468(p0, n0, NIM_FALSE); } LA183: ; } break; case ((Tnodekind290020) 74): { { if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag290427) 14))&15U)))!=0))) goto LA188; genasgn_547239_839829468(p0, n0, !(((*p0).prc == NIM_NIL))); } LA188: ; } break; case ((Tnodekind290020) 114): { { Tloc290816 a0; if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind290020) 1)))) goto LA193; genlinedir_530823_839829468(p0, n0); memset((void*)(&a0), 0, sizeof(a0)); initlocexpr_537283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0)); } LA193: ; } break; case ((Tnodekind290020) 89): { genasmstmt_546659_839829468(p0, n0); } break; case ((Tnodekind290020) 106): { { NIM_BOOL LOC199; NIM_BOOL LOC200; LOC199 = (NIM_BOOL)0; LOC200 = (NIM_BOOL)0; LOC200 = (gcmd_168132_2607990831 == ((Tcommands168076) 2)); if (LOC200) goto LA201; LOC200 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA201: ; LOC199 = LOC200; if (!(LOC199)) goto LA202; LOC199 = !(((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 31))&63U)))!=0)); LA202: ; if (!LOC199) goto LA203; gentrycpp_545865_839829468(p0, n0, d0); } goto LA197; LA203: ; { gentry_546114_839829468(p0, n0, d0); } LA197: ; } break; case ((Tnodekind290020) 108): { genraisestmt_544828_839829468(p0, n0); } break; case ((Tnodekind290020) 98): { gentypesection_536184_839829468((*p0).module, n0); } break; case ((Tnodekind290020) 125): case ((Tnodekind290020) 84): case ((Tnodekind290020) 121): case ((Tnodekind290020) 116): case ((Tnodekind290020) 117): case ((Tnodekind290020) 118): case ((Tnodekind290020) 119): case ((Tnodekind290020) 120): case ((Tnodekind290020) 83): case ((Tnodekind290020) 82): { } break; case ((Tnodekind290020) 90): { genpragma_547039_839829468(p0, n0); } break; case ((Tnodekind290020) 91): { Tnode290802* LOC211; LOC211 = (Tnode290802*)0; LOC211 = lastson_293364_850551059(n0); expr_537248_839829468(p0, LOC211, d0); } break; case ((Tnodekind290020) 79): case ((Tnodekind290020) 80): case ((Tnodekind290020) 81): { { Tsym290834* prc0; if (!((*(*n0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind290020) 1))) goto LA215; prc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym; { NIM_BOOL LOC219; Tsym290834* LOC220; LOC219 = (NIM_BOOL)0; LOC220 = (Tsym290834*)0; LOC220 = skipgenericowner_295279_850551059(prc0); LOC219 = ((*LOC220).kind == ((Tsymkind290435) 6)); if (!(LOC219)) goto LA221; LOC219 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 23))&31U)))!=0)); LA221: ; if (!LOC219) goto LA222; { NIM_BOOL LOC226; NIM_BOOL LOC227; NIM_BOOL LOC228; NIM_BOOL LOC229; Tsym290834* LOC231; NIM_BOOL LOC234; LOC226 = (NIM_BOOL)0; LOC227 = (NIM_BOOL)0; LOC228 = (NIM_BOOL)0; LOC229 = (NIM_BOOL)0; LOC229 = !(((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 2))&63U)))!=0)); if (!(LOC229)) goto LA230; LOC231 = (Tsym290834*)0; LOC231 = getmodule_297123_2984716966(prc0); LOC229 = !((((*LOC231).flags &(1U<<((NU)(((Tsymflag290184) 25))&31U)))!=0)); LA230: ; LOC228 = LOC229; if (LOC228) goto LA232; LOC228 = ((65600 & (*prc0).flags) == 64); LA232: ; LOC227 = LOC228; if (LOC227) goto LA233; LOC234 = (NIM_BOOL)0; LOC234 = (((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 6))&31U)))!=0); if (!(LOC234)) goto LA235; LOC234 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag290810) 5))&15U)))!=0); LA235: ; LOC227 = LOC234; LA233: ; LOC226 = LOC227; if (LOC226) goto LA236; LOC226 = ((*prc0).kind == ((Tsymkind290435) 13)); LA236: ; if (!LOC226) goto LA237; { NIM_BOOL LOC241; Tnode290802* LOC242; LOC241 = (NIM_BOOL)0; LOC242 = (Tnode290802*)0; LOC242 = getbody_333227_1724185294(prc0); LOC241 = !(((*LOC242).kind == ((Tnodekind290020) 1))); if (LOC241) goto LA243; LOC241 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag290810) 4))&15U)))!=0); LA243: ; if (!LOC241) goto LA244; genproc_530951_839829468((*p0).module, prc0); } LA244: ; } LA237: ; } LA222: ; } LA215: ; } break; case ((Tnodekind290020) 95): { genparforstmt_544208_839829468(p0, n0); } break; case ((Tnodekind290020) 157): { genstate_542117_839829468(p0, n0); } break; case ((Tnodekind290020) 156): { gengotostate_542144_839829468(p0, n0); } break; case ((Tnodekind290020) 158): { genbreakstate_542229_839829468(p0, n0); } break; default: { NimStringDesc* LOC251; LOC251 = (NimStringDesc*)0; LOC251 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI290020))->Sup.len + 25); appendString(LOC251, ((NimStringDesc*) &T839829468_291)); appendString(LOC251, reprEnum((NI)(*n0).kind, (&NTI290020))); appendString(LOC251, ((NimStringDesc*) &T839829468_657)); internalerror_194100_155036129((*n0).info, LOC251); } break; } } N_NIMCALL(void, genstmts_537244_839829468)(Tcproc527021* p0, Tnode290802* t0) { Tloc290816 a0; memset((void*)(&a0), 0, sizeof(a0)); expr_537248_839829468(p0, t0, (&a0)); { NimStringDesc* LOC5; if (!!(((7 &(1U<<((NU)(a0.k)&15U)))!=0))) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = HEX24_194185_1689653243(T839829468_658); internalerror_194113_155036129(LOC5); } LA3: ; } N_NIMCALL(Tnode290802*, myprocess_561402_839829468)(Tpasscontext339002* b0, Tnode290802* n0) { Tnode290802* result0; Tcgen527027* m0; { result0 = (Tnode290802*)0; result0 = n0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (b0 == NIM_NIL); if (LOC3) goto LA4; LOC3 = skipcodegen_339085_2355241294(n0); LA4: ; if (!LOC3) goto LA5; goto BeforeRet; } LA5: ; m0 = ((Tcgen527027*) (b0)); (*(*m0).initproc).options = initprocoptions_560635_839829468(m0); genstmts_537244_839829468((*m0).initproc, n0); }BeforeRet: ; return result0; } N_NIMCALL(Ropeobj177006*, getsomeinitname_559904_839829468)(Tsym290834* m0, NimStringDesc* suffix0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { NimStringDesc* LOC5; if (!((12288 & (*m0).flags) == 0)) goto LA3; LOC5 = (NimStringDesc*)0; LOC5 = mangle_526847_2036603609((*(*(*m0).owner).name).s); result0 = rope_177277_2381377266(LOC5); add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_12)); } LA3: ; add_177487_2381377266(&result0, (*(*m0).name).s); add_177487_2381377266(&result0, suffix0); return result0; } N_NIMCALL(Ropeobj177006*, getinitname_560235_839829468)(Tsym290834* m0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = getsomeinitname_559904_839829468(m0, ((NimStringDesc*) &T839829468_659)); return result0; } N_NIMCALL(Ropeobj177006*, getdatinitname_560239_839829468)(Tsym290834* m0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = getsomeinitname_559904_839829468(m0, ((NimStringDesc*) &T839829468_660)); return result0; } N_NIMCALL(void, registermoduletomain_560243_839829468)(Tsym290834* m0) { Ropeobj177006* init0; Ropeobj177006* datinit0; TY177507 LOC1; TY177507 LOC2; init0 = getinitname_560235_839829468(m0); datinit0 = getdatinitname_560239_839829468(m0); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = init0; addf_178205_2381377266(&mainmodprocs_527148_3723162438, ((NimStringDesc*) &T839829468_661), LOC1, 1); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = datinit0; addf_178205_2381377266(&mainmodprocs_527148_3723162438, ((NimStringDesc*) &T839829468_661), LOC2, 1); { TY177507 LOC7; Ropeobj177006* initcall0; TY177507 LOC8; if (!!((((*m0).flags &(1U<<((NU)(((Tsymflag290184) 13))&31U)))!=0))) goto LA5; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = datinit0; addf_178205_2381377266(&maindatinit_527151_3723162438, ((NimStringDesc*) &T839829468_662), LOC7, 1); memset((void*)LOC8, 0, sizeof(LOC8)); LOC8[0] = init0; initcall0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_662), LOC8, 1); { if (!(((*m0).flags &(1U<<((NU)(((Tsymflag290184) 12))&31U)))!=0)) goto LA11; add_177482_2381377266(&mainmodinit_527149_3723162438, initcall0); } goto LA9; LA11: ; { add_177482_2381377266(&othermodsinit_527150_3723162438, initcall0); } LA9: ; } LA5: ; } N_NIMCALL(Ropeobj177006*, genfilenames_559688_839829468)(Tcgen527027* m0) { Ropeobj177006* result0; Ropeobj177006* LOC1; result0 = (Ropeobj177006*)0; LOC1 = (Ropeobj177006*)0; LOC1 = cgsym_530403_839829468(m0, ((NimStringDesc*) &T839829468_673)); result0 = NIM_NIL; { NI i_559717_839829468; NI HEX3Atmp_559722_839829468; NI res_559725_839829468; i_559717_839829468 = (NI)0; HEX3Atmp_559722_839829468 = (NI)0; HEX3Atmp_559722_839829468 = ((fileinfos_189629_155036129 ? fileinfos_189629_155036129->Sup.len : 0) - 1); res_559725_839829468 = ((NI) 0); { while (1) { TY177507 LOC5; if (!(res_559725_839829468 <= HEX3Atmp_559722_839829468)) goto LA4; i_559717_839829468 = res_559725_839829468; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = makecstring_189638_155036129(fileinfos_189629_155036129->data[i_559717_839829468].projpath); addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_674), LOC5, 1); res_559725_839829468 += ((NI) 1); } LA4: ; } } return result0; } N_NIMCALL(void, genmainproc_559729_839829468)(Tcgen527027* m0) { NimStringDesc* nimmain0; NimStringDesc* othermain0; Ropeobj177006* initstackbottomcall0; TY534475 LOC38; TY533238 LOC47; nimmain0 = (NimStringDesc*)0; othermain0 = (NimStringDesc*)0; { NIM_BOOL LOC3; NIM_BOOL LOC12; LOC3 = (NIM_BOOL)0; LOC3 = (targetos_175629_4151366050 == ((Tsystemos175004) 2)); if (!(LOC3)) goto LA4; LOC3 = !(((gglobaloptions_168130_2607990831 & 1280) == 0)); LA4: ; if (!LOC3) goto LA5; { if (!((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 10))&63U)))!=0)) goto LA9; nimmain0 = copyString(((NimStringDesc*) &T839829468_663)); othermain0 = copyString(((NimStringDesc*) &T839829468_664)); } goto LA7; LA9: ; { nimmain0 = copyString(((NimStringDesc*) &T839829468_665)); othermain0 = copyString(((NimStringDesc*) &T839829468_666)); } LA7: ; LOC12 = (NIM_BOOL)0; LOC12 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_667)); } goto LA1; LA5: ; { if (!((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 8))&63U)))!=0)) goto LA14; nimmain0 = copyString(((NimStringDesc*) &T839829468_665)); othermain0 = copyString(((NimStringDesc*) &T839829468_668)); } goto LA1; LA14: ; { if (!(targetos_175629_4151366050 == ((Tsystemos175004) 24))) goto LA17; nimmain0 = copyString(((NimStringDesc*) &T839829468_669)); othermain0 = copyString(((NimStringDesc*) &T839829468_670)); } goto LA1; LA17: ; { nimmain0 = copyString(((NimStringDesc*) &T839829468_669)); othermain0 = copyString(((NimStringDesc*) &T839829468_671)); } LA1: ; { Ropeobj177006* LOC24; if (!!((gbreakpoints_546861_839829468 == NIM_NIL))) goto LA22; LOC24 = (Ropeobj177006*)0; LOC24 = cgsym_530403_839829468(m0, ((NimStringDesc*) &T839829468_672)); } LA22: ; { Ropeobj177006* LOC29; if (!((goptions_168128_2607990831 &(1U<<((NU)(((Toption168009) 17))&31U)))!=0)) goto LA27; LOC29 = (Ropeobj177006*)0; LOC29 = genfilenames_559688_839829468(m0); add_177482_2381377266(&gbreakpoints_546861_839829468, LOC29); } LA27: ; { NIM_BOOL LOC32; LOC32 = (NIM_BOOL)0; LOC32 = (targetos_175629_4151366050 == ((Tsystemos175004) 24)); if (LOC32) goto LA33; LOC32 = (gselectedgc_168133_2607990831 == ((Tgcmode168080) 0)); LA33: ; if (!LOC32) goto LA34; initstackbottomcall0 = rope_177277_2381377266(((NimStringDesc*) &T839829468_490)); } goto LA30; LA34: ; { TY531289 LOC37; memset((void*)LOC37, 0, sizeof(LOC37)); initstackbottomcall0 = ropecg_530407_839829468(m0, ((NimStringDesc*) &T839829468_675), LOC37, 0); } LA30: ; (*m0).labels += ((NI) 1); memset((void*)LOC38, 0, sizeof(LOC38)); LOC38[0] = maindatinit_527151_3723162438; LOC38[1] = gbreakpoints_546861_839829468; LOC38[2] = othermodsinit_527150_3723162438; { NIM_BOOL LOC41; TY531289 LOC45; LOC41 = (NIM_BOOL)0; LOC41 = emulatedthreadvars_530949_839829468(); if (!(LOC41)) goto LA42; LOC41 = !((targetos_175629_4151366050 == ((Tsystemos175004) 24))); LA42: ; if (!LOC41) goto LA43; memset((void*)LOC45, 0, sizeof(LOC45)); LOC38[3] = ropecg_530407_839829468(m0, ((NimStringDesc*) &T839829468_677), LOC45, 0); } goto LA39; LA43: ; { LOC38[3] = rope_177277_2381377266(((NimStringDesc*) &T839829468_490)); } LA39: ; LOC38[4] = initstackbottomcall0; appcg_530632_839829468(m0, &(*m0).s[(((Tcfilesection527005) 10))- 0], ((NimStringDesc*) &T839829468_676), LOC38, 5); memset((void*)LOC47, 0, sizeof(LOC47)); LOC47[0] = mainmodinit_527149_3723162438; LOC47[1] = initstackbottomcall0; LOC47[2] = rope_177401_2381377266(((NI64) ((*m0).labels))); appcg_530632_839829468(m0, &(*m0).s[(((Tcfilesection527005) 10))- 0], nimmain0, LOC47, 3); { TY531289 LOC52; if (!!(((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 20))&63U)))!=0))) goto LA50; memset((void*)LOC52, 0, sizeof(LOC52)); appcg_530632_839829468(m0, &(*m0).s[(((Tcfilesection527005) 10))- 0], othermain0, LOC52, 0); } LA50: ; } N_NIMCALL(Tnode290802*, myclose_561830_839829468)(Tpasscontext339002* b0, Tnode290802* n0) { Tnode290802* result0; Tcgen527027* m0; { result0 = (Tnode290802*)0; result0 = n0; { NIM_BOOL LOC3; LOC3 = (NIM_BOOL)0; LOC3 = (b0 == NIM_NIL); if (LOC3) goto LA4; LOC3 = skipcodegen_339085_2355241294(n0); LA4: ; if (!LOC3) goto LA5; goto BeforeRet; } LA5: ; m0 = ((Tcgen527027*) (b0)); { if (!!((n0 == NIM_NIL))) goto LA9; (*(*m0).initproc).options = initprocoptions_560635_839829468(m0); genstmts_537244_839829468((*m0).initproc, n0); } LA9: ; registermoduletomain_560243_839829468((*m0).module); { Tnode290802* disp0; if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 12))&31U)))!=0)) goto LA13; (*m0).flags |= ((NU8)1)<<((((Codegenflag527025) 5))%(sizeof(NU8)*8)); disp0 = generatemethoddispatchers_430151_3853300031(); { NI i_561891_839829468; NI HEX3Atmp_561895_839829468; NI LOC16; NI res_561898_839829468; i_561891_839829468 = (NI)0; HEX3Atmp_561895_839829468 = (NI)0; LOC16 = (NI)0; LOC16 = sonslen_293351_850551059(disp0); HEX3Atmp_561895_839829468 = (NI)(LOC16 - ((NI) 1)); res_561898_839829468 = ((NI) 0); { while (1) { if (!(res_561898_839829468 <= HEX3Atmp_561895_839829468)) goto LA18; i_561891_839829468 = res_561898_839829468; genprocaux_558284_839829468(m0, (*(*disp0).kindU.S6.sons->data[i_561891_839829468]).kindU.S4.sym); res_561898_839829468 += ((NI) 1); } LA18: ; } } genmainproc_559729_839829468(m0); } LA13: ; }BeforeRet: ; return result0; } N_NIMCALL(void, finishmodule_561420_839829468)(Tcgen527027* m0) { NI i0; i0 = ((NI) 0); { while (1) { Tsym290834* prc0; if (!(i0 <= ((*m0).forwardedprocs ? ((*m0).forwardedprocs->Sup.len-1) : -1))) goto LA2; prc0 = (*m0).forwardedprocs->data[i0]; { NimStringDesc* LOC7; if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag290184) 4))&31U)))!=0)) goto LA5; LOC7 = (NimStringDesc*)0; LOC7 = rawNewString((*(*prc0).name).s->Sup.len + 17); appendString(LOC7, ((NimStringDesc*) &T839829468_678)); appendString(LOC7, (*(*prc0).name).s); internalerror_194100_155036129((*prc0).info, LOC7); } LA5: ; genprocnoforward_558906_839829468(m0, prc0); i0 += ((NI) 1); } LA2: ; } gforwardedprocscounter_527171_3723162438 -= i0; (*m0).forwardedprocs = (Tsymseq290804*) setLengthSeq(&((*m0).forwardedprocs)->Sup, sizeof(Tsym290834*), ((NI) 0)); } N_NIMCALL(void, geninitcode_560286_839829468)(Tcgen527027* m0) { Ropeobj177006* initname0; Ropeobj177006* prc0; TY177507 LOC1; Ropeobj177006* LOC12; Ropeobj177006* LOC13; Ropeobj177006** LOC14; Ropeobj177006** LOC15; Ropeobj177006** LOC16; Ropeobj177006* LOC17; Ropeobj177006* LOC33; Ropeobj177006** LOC34; Ropeobj177006** LOC35; Ropeobj177006** LOC36; Ropeobj177006* LOC37; Ropeobj177006* LOC38; Ropeobj177006** LOC39; Ropeobj177006** LOC40; Ropeobj177006** LOC41; Ropeobj177006* LOC42; Ropeobj177006* LOC50; TY531289 LOC51; TY177507 LOC52; TY531289 LOC58; initname0 = getinitname_560235_839829468((*m0).module); memset((void*)LOC1, 0, sizeof(LOC1)); LOC1[0] = initname0; prc0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_679), LOC1, 1); { TY530811 LOC6; if (!(((NI) 0) < (*m0).typenodes)) goto LA4; memset((void*)LOC6, 0, sizeof(LOC6)); LOC6[0] = (*m0).typenodesname; LOC6[1] = rope_177401_2381377266(((NI64) ((*m0).typenodes))); appcg_530632_839829468(m0, &(*m0).s[(((Tcfilesection527005) 12))- 0], ((NimStringDesc*) &T839829468_680), LOC6, 2); } LA4: ; { TY530811 LOC11; if (!(((NI) 0) < (*m0).nimtypes)) goto LA9; memset((void*)LOC11, 0, sizeof(LOC11)); LOC11[0] = (*m0).nimtypesname; LOC11[1] = rope_177401_2381377266(((NI64) ((*m0).nimtypes))); appcg_530632_839829468(m0, &(*m0).s[(((Tcfilesection527005) 12))- 0], ((NimStringDesc*) &T839829468_681), LOC11, 2); } LA9: ; LOC12 = (Ropeobj177006*)0; LOC12 = initgcframe_536435_839829468((*m0).initproc); add_177482_2381377266(&prc0, LOC12); LOC13 = (Ropeobj177006*)0; LOC13 = gensectionstart_528081_2760143328(((Tcprocsection527011) 0)); add_177482_2381377266(&prc0, LOC13); LOC14 = (Ropeobj177006**)0; LOC14 = s_527179_3723162438((*m0).preinitproc, ((Tcprocsection527011) 0)); add_177482_2381377266(&prc0, (*LOC14)); LOC15 = (Ropeobj177006**)0; LOC15 = s_527179_3723162438((*m0).initproc, ((Tcprocsection527011) 0)); add_177482_2381377266(&prc0, (*LOC15)); LOC16 = (Ropeobj177006**)0; LOC16 = s_527179_3723162438((*m0).postinitproc, ((Tcprocsection527011) 0)); add_177482_2381377266(&prc0, (*LOC16)); LOC17 = (Ropeobj177006*)0; LOC17 = gensectionend_528116_2760143328(((Tcprocsection527011) 0)); add_177482_2381377266(&prc0, LOC17); { NIM_BOOL LOC20; LOC20 = (NIM_BOOL)0; LOC20 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption168009) 15))&31U)))!=0); if (!(LOC20)) goto LA21; LOC20 = !((((*m0).flags &(1U<<((NU)(((Codegenflag527025) 2))&7U)))!=0)); LA21: ; if (!LOC20) goto LA22; (*m0).flags |= ((NU8)1)<<((((Codegenflag527025) 2))%(sizeof(NU8)*8)); { Ropeobj177006* procname0; Ropeobj177006* LOC28; Ropeobj177006* LOC29; if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag527025) 0))&7U)))!=0))) goto LA26; procname0 = makecstring_189638_155036129((*(*(*m0).module).name).s); LOC28 = (Ropeobj177006*)0; LOC28 = quotedfilename_194818_155036129((*(*m0).module).info); LOC29 = (Ropeobj177006*)0; LOC29 = initframe_558140_839829468((*m0).initproc, procname0, LOC28); add_177482_2381377266(&prc0, LOC29); } goto LA24; LA26: ; { TY531289 LOC31; Ropeobj177006* LOC32; memset((void*)LOC31, 0, sizeof(LOC31)); LOC32 = (Ropeobj177006*)0; LOC32 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_682), LOC31, 0); add_177482_2381377266(&prc0, LOC32); } LA24: ; } LA22: ; LOC33 = (Ropeobj177006*)0; LOC33 = gensectionstart_528081_2760143328(((Tcprocsection527011) 1)); add_177482_2381377266(&prc0, LOC33); LOC34 = (Ropeobj177006**)0; LOC34 = s_527179_3723162438((*m0).preinitproc, ((Tcprocsection527011) 1)); add_177482_2381377266(&prc0, (*LOC34)); LOC35 = (Ropeobj177006**)0; LOC35 = s_527179_3723162438((*m0).initproc, ((Tcprocsection527011) 1)); add_177482_2381377266(&prc0, (*LOC35)); LOC36 = (Ropeobj177006**)0; LOC36 = s_527179_3723162438((*m0).postinitproc, ((Tcprocsection527011) 1)); add_177482_2381377266(&prc0, (*LOC36)); LOC37 = (Ropeobj177006*)0; LOC37 = gensectionend_528116_2760143328(((Tcprocsection527011) 1)); add_177482_2381377266(&prc0, LOC37); LOC38 = (Ropeobj177006*)0; LOC38 = gensectionstart_528081_2760143328(((Tcprocsection527011) 2)); add_177482_2381377266(&prc0, LOC38); LOC39 = (Ropeobj177006**)0; LOC39 = s_527179_3723162438((*m0).preinitproc, ((Tcprocsection527011) 2)); add_177482_2381377266(&prc0, (*LOC39)); LOC40 = (Ropeobj177006**)0; LOC40 = s_527179_3723162438((*m0).initproc, ((Tcprocsection527011) 2)); add_177482_2381377266(&prc0, (*LOC40)); LOC41 = (Ropeobj177006**)0; LOC41 = s_527179_3723162438((*m0).postinitproc, ((Tcprocsection527011) 2)); add_177482_2381377266(&prc0, (*LOC41)); LOC42 = (Ropeobj177006*)0; LOC42 = gensectionend_528116_2760143328(((Tcprocsection527011) 2)); add_177482_2381377266(&prc0, LOC42); { NIM_BOOL LOC45; Ropeobj177006* LOC49; LOC45 = (NIM_BOOL)0; LOC45 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption168009) 15))&31U)))!=0); if (!(LOC45)) goto LA46; LOC45 = !((((*m0).flags &(1U<<((NU)(((Codegenflag527025) 0))&7U)))!=0)); LA46: ; if (!LOC45) goto LA47; LOC49 = (Ropeobj177006*)0; LOC49 = deinitframe_558150_839829468((*m0).initproc); add_177482_2381377266(&prc0, LOC49); } LA47: ; LOC50 = (Ropeobj177006*)0; LOC50 = deinitgcframe_536441_839829468((*m0).initproc); add_177482_2381377266(&prc0, LOC50); memset((void*)LOC51, 0, sizeof(LOC51)); addf_178205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC51, 0); memset((void*)LOC52, 0, sizeof(LOC52)); LOC52[0] = getdatinitname_560239_839829468((*m0).module); addf_178205_2381377266(&prc0, ((NimStringDesc*) &T839829468_679), LOC52, 1); { Tcfilesection527005 i_560401_839829468; NI res_560482_839829468; i_560401_839829468 = (Tcfilesection527005)0; res_560482_839829468 = ((NI) 12); { while (1) { Ropeobj177006* LOC56; Ropeobj177006* LOC57; if (!(res_560482_839829468 <= ((NI) 16))) goto LA55; i_560401_839829468 = ((Tcfilesection527005) (res_560482_839829468)); LOC56 = (Ropeobj177006*)0; LOC56 = gensectionstart_528015_2760143328(i_560401_839829468); add_177482_2381377266(&prc0, LOC56); add_177482_2381377266(&prc0, (*m0).s[(i_560401_839829468)- 0]); LOC57 = (Ropeobj177006*)0; LOC57 = gensectionend_528050_2760143328(i_560401_839829468); add_177482_2381377266(&prc0, LOC57); res_560482_839829468 += ((NI) 1); } LA55: ; } } memset((void*)LOC58, 0, sizeof(LOC58)); addf_178205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC58, 0); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 11))- 0], prc0); { NIM_CHAR i_560442_839829468; Ropeobj177006* el_560443_839829468; TY527136 HEX3Atmp_560487_839829468; NIM_CHAR i_560490_839829468; i_560442_839829468 = (NIM_CHAR)0; el_560443_839829468 = (Ropeobj177006*)0; memset((void*)HEX3Atmp_560487_839829468, 0, sizeof(HEX3Atmp_560487_839829468)); memcpy((void*)HEX3Atmp_560487_839829468, (NIM_CONST void*)(*m0).extensionloaders, sizeof(HEX3Atmp_560487_839829468)); i_560490_839829468 = 48; { if (!((NU8)(((NIM_CHAR) (((NU8)(i_560490_839829468))))) <= (NU8)(57))) goto LA62; { while (1) { i_560442_839829468 = i_560490_839829468; el_560443_839829468 = HEX3Atmp_560487_839829468[(((NU8)(i_560490_839829468)))- 48]; { Ropeobj177006* ex0; TY530811 LOC70; if (!!((el_560443_839829468 == NIM_NIL))) goto LA68; memset((void*)LOC70, 0, sizeof(LOC70)); LOC70[0] = rope_177401_2381377266(((NI64) ((NI)(((NI) (((NU8)(i_560442_839829468)))) - ((NI) 48))))); LOC70[1] = el_560443_839829468; ex0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_684), LOC70, 2); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 11))- 0], ex0); } LA68: ; { if (!((NU8)(57) <= (NU8)(((NIM_CHAR) (((NU8)(i_560490_839829468))))))) goto LA73; goto LA64; } LA73: ; i_560490_839829468 += ((NI) 1); } } LA64: ; } LA62: ; } } N_NIMCALL(void, finishtypedescriptions_533842_839829468)(Tcgen527027* m0) { NI i0; i0 = ((NI) 0); { while (1) { Ropeobj177006* LOC3; if (!(i0 < ((*m0).typestack ? (*m0).typestack->Sup.len : 0))) goto LA2; LOC3 = (Ropeobj177006*)0; LOC3 = gettypedesc_533671_839829468(m0, (*m0).typestack->data[i0]); i0 += ((NI) 1); } LA2: ; } } N_NIMCALL(Ropeobj177006*, getcopyright_559665_839829468)(NimStringDesc* cfile0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; { TY177507 LOC5; if (!((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 4))&63U)))!=0)) goto LA3; memset((void*)LOC5, 0, sizeof(LOC5)); LOC5[0] = rope_177277_2381377266(((NimStringDesc*) &T839829468_686)); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_685), LOC5, 1); } goto LA1; LA3: ; { TY534475 LOC7; NimStringDesc* LOC8; memset((void*)LOC7, 0, sizeof(LOC7)); LOC7[0] = rope_177277_2381377266(((NimStringDesc*) &T839829468_686)); LOC7[1] = rope_177277_2381377266(Os_175068_4151366050[(targetos_175629_4151366050)- 1].Field0); LOC7[2] = rope_177277_2381377266(Cpu_175496_4151366050[(targetcpu_175627_4151366050)- 1].Field0); LOC7[3] = rope_177277_2381377266(Cc_271413_2528170400[(ccompiler_271431_2528170400)- 1].Field0); LOC8 = (NimStringDesc*)0; LOC8 = getcompilecfilecmd_272284_2528170400(cfile0, NIM_FALSE); LOC7[4] = rope_177277_2381377266(LOC8); result0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_687), LOC7, 5); } LA1: ; return result0; } static N_INLINE(void, addinttypes_559659_839829468)(Ropeobj177006** result0) { NimStringDesc* LOC1; TY177507 LOC2; LOC1 = (NimStringDesc*)0; LOC1 = rawNewString(tnl_175644_4151366050->Sup.len + 22); appendString(LOC1, ((NimStringDesc*) &T839829468_688)); appendString(LOC1, tnl_175644_4151366050); memset((void*)LOC2, 0, sizeof(LOC2)); LOC2[0] = rope_177401_2381377266(((NI64) (Cpu_175496_4151366050[(targetcpu_175627_4151366050)- 1].Field1))); addf_178205_2381377266(result0, LOC1, LOC2, 1); } N_NIMCALL(Ropeobj177006*, getfileheader_559683_839829468)(NimStringDesc* cfile0) { Ropeobj177006* result0; result0 = (Ropeobj177006*)0; result0 = getcopyright_559665_839829468(cfile0); addinttypes_559659_839829468(&result0); return result0; } N_NIMCALL(void, generatethreadlocalstorage_536717_839829468)(Tcgen527027* m0) { { NIM_BOOL LOC3; NIM_BOOL LOC5; TY177507 LOC13; LOC3 = (NIM_BOOL)0; LOC3 = !((nimtv_536656_839829468 == NIM_NIL)); if (!(LOC3)) goto LA4; LOC5 = (NIM_BOOL)0; LOC5 = (((*m0).flags &(1U<<((NU)(((Codegenflag527025) 1))&7U)))!=0); if (LOC5) goto LA6; LOC5 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 12))&31U)))!=0); LA6: ; LOC3 = LOC5; LA4: ; if (!LOC3) goto LA7; { Ttype290840* t_536761_839829468; NI i_536768_839829468; NI L_536770_839829468; t_536761_839829468 = (Ttype290840*)0; i_536768_839829468 = ((NI) 0); L_536770_839829468 = (nimtvdeps_536674_839829468 ? nimtvdeps_536674_839829468->Sup.len : 0); { while (1) { Ropeobj177006* LOC12; if (!(i_536768_839829468 < L_536770_839829468)) goto LA11; t_536761_839829468 = nimtvdeps_536674_839829468->data[i_536768_839829468]; LOC12 = (Ropeobj177006*)0; LOC12 = gettypedesc_533671_839829468(m0, t_536761_839829468); i_536768_839829468 += ((NI) 1); } LA11: ; } } memset((void*)LOC13, 0, sizeof(LOC13)); LOC13[0] = nimtv_536656_839829468; addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 4))- 0], ((NimStringDesc*) &T839829468_689), LOC13, 1); } LA7: ; } N_NIMCALL(void, generateheaders_558104_839829468)(Tcgen527027* m0) { NimStringDesc* LOC1; Tstrentry147009* it0; LOC1 = (NimStringDesc*)0; LOC1 = rawNewString(tnl_175644_4151366050->Sup.len + tnl_175644_4151366050->Sup.len + 20); appendString(LOC1, tnl_175644_4151366050); appendString(LOC1, ((NimStringDesc*) &T839829468_690)); appendString(LOC1, tnl_175644_4151366050); add_177487_2381377266(&(*m0).s[(((Tcfilesection527005) 1))- 0], LOC1); it0 = ((Tstrentry147009*) ((*m0).headerfiles.head)); { while (1) { if (!!((it0 == NIM_NIL))) goto LA3; { NimStringDesc* LOC8; NimStringDesc* LOC9; Ropeobj177006* LOC10; if (!((NU8)((*it0).data->data[((NI) 0)]) == (NU8)(35))) goto LA6; LOC8 = (NimStringDesc*)0; LOC9 = (NimStringDesc*)0; LOC9 = nsuReplaceChar((*it0).data, 96, 34); LOC8 = rawNewString(LOC9->Sup.len + tnl_175644_4151366050->Sup.len + 0); appendString(LOC8, LOC9); appendString(LOC8, tnl_175644_4151366050); LOC10 = (Ropeobj177006*)0; LOC10 = rope_177277_2381377266(LOC8); add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 1))- 0], LOC10); } goto LA4; LA6: ; { TY177507 LOC14; if (!!((((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(34)) || ((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(60))))) goto LA12; memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = rope_177277_2381377266((*it0).data); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 1))- 0], ((NimStringDesc*) &T839829468_691), LOC14, 1); } goto LA4; LA12: ; { TY177507 LOC16; memset((void*)LOC16, 0, sizeof(LOC16)); LOC16[0] = rope_177277_2381377266((*it0).data); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 1))- 0], ((NimStringDesc*) &T839829468_692), LOC16, 1); } LA4: ; it0 = ((Tstrentry147009*) ((*it0).Sup.next)); } LA3: ; } } N_NIMCALL(Ropeobj177006*, genmodule_560491_839829468)(Tcgen527027* m0, NimStringDesc* cfile0) { Ropeobj177006* result0; Ropeobj177006* LOC1; result0 = (Ropeobj177006*)0; result0 = getfileheader_559683_839829468(cfile0); LOC1 = (Ropeobj177006*)0; LOC1 = genmergeinfo_528203_2760143328(m0); add_177482_2381377266(&result0, LOC1); generatethreadlocalstorage_536717_839829468(m0); generateheaders_558104_839829468(m0); { Tcfilesection527005 i_560614_839829468; NI res_560622_839829468; i_560614_839829468 = (Tcfilesection527005)0; res_560622_839829468 = ((NI) 1); { while (1) { Ropeobj177006* LOC5; Ropeobj177006* LOC6; if (!(res_560622_839829468 <= ((NI) 10))) goto LA4; i_560614_839829468 = ((Tcfilesection527005) (res_560622_839829468)); LOC5 = (Ropeobj177006*)0; LOC5 = gensectionstart_528015_2760143328(i_560614_839829468); add_177482_2381377266(&result0, LOC5); add_177482_2381377266(&result0, (*m0).s[(i_560614_839829468)- 0]); LOC6 = (Ropeobj177006*)0; LOC6 = gensectionend_528050_2760143328(i_560614_839829468); add_177482_2381377266(&result0, LOC6); res_560622_839829468 += ((NI) 1); } LA4: ; } } add_177482_2381377266(&result0, (*m0).s[(((Tcfilesection527005) 11))- 0]); return result0; } N_NIMCALL(void, updatecachedmodule_561813_839829468)(Tcgen527027* m0) { NimStringDesc* cfile0; NimStringDesc* cfilenoext0; cfile0 = getcfile_561204_839829468(m0); cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490)); { NIM_BOOL LOC3; Ropeobj177006* code0; LOC3 = (NIM_BOOL)0; LOC3 = mergerequired_528832_2760143328(m0); if (!(LOC3)) goto LA4; LOC3 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 12))&31U)))!=0)); LA4: ; if (!LOC3) goto LA5; mergefiles_529241_2760143328(cfile0, m0); geninitcode_560286_839829468(m0); finishtypedescriptions_533842_839829468(m0); code0 = genmodule_560491_839829468(m0, cfile0); writerope_177836_2381377266(code0, cfile0, NIM_FALSE); addfiletocompile_271863_2528170400(cfile0); } LA5: ; addfiletolink_271872_2528170400(cfilenoext0); } N_NIMCALL(void, generatethreadvarssize_536771_839829468)(Tcgen527027* m0) { { NimStringDesc* externc0; TY177507 LOC12; if (!!((nimtv_536656_839829468 == NIM_NIL))) goto LA3; { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = !((gcmd_168132_2607990831 == ((Tcommands168076) 2))); if (!(LOC7)) goto LA8; LOC7 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 27))&31U)))!=0); LA8: ; if (!LOC7) goto LA9; externc0 = copyString(((NimStringDesc*) &T839829468_693)); } goto LA5; LA9: ; { externc0 = copyString(((NimStringDesc*) &T839829468_490)); } LA5: ; memset((void*)LOC12, 0, sizeof(LOC12)); LOC12[0] = rope_177277_2381377266(externc0); addf_178205_2381377266(&(*m0).s[(((Tcfilesection527005) 10))- 0], ((NimStringDesc*) &T839829468_694), LOC12, 1); } LA3: ; } N_NIMCALL(NIM_BOOL, shouldrecompile_561621_839829468)(Ropeobj177006* code0, NimStringDesc* cfile0) { NIM_BOOL result0; { result0 = (NIM_BOOL)0; result0 = NIM_TRUE; { NimStringDesc* objfile0; if (!!(((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 1))&63U)))!=0))) goto LA3; objfile0 = toobjfile_271859_2528170400(cfile0); { NIM_BOOL LOC7; LOC7 = (NIM_BOOL)0; LOC7 = writeropeifnotequal_178511_2381377266(code0, cfile0); if (!LOC7) goto LA8; goto BeforeRet; } LA8: ; { NIM_BOOL LOC12; LOC12 = (NIM_BOOL)0; LOC12 = nosexistsFile(objfile0); if (!(LOC12)) goto LA13; LOC12 = nosfileNewer(objfile0, cfile0); LA13: ; if (!LOC12) goto LA14; result0 = NIM_FALSE; } LA14: ; } goto LA1; LA3: ; { writerope_177836_2381377266(code0, cfile0, NIM_FALSE); } LA1: ; }BeforeRet: ; return result0; } N_NIMCALL(void, writemodule_561637_839829468)(Tcgen527027* m0, NIM_BOOL pending0) { NimStringDesc* cfile0; NimStringDesc* cfilenoext0; cfile0 = getcfile_561204_839829468(m0); cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490)); { NIM_BOOL LOC3; Ropeobj177006* code0; LOC3 = (NIM_BOOL)0; LOC3 = !((*m0).Sup.fromcache); if (LOC3) goto LA4; LOC3 = ((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 1))&63U)))!=0); LA4: ; if (!LOC3) goto LA5; geninitcode_560286_839829468(m0); finishtypedescriptions_533842_839829468(m0); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 12))&31U)))!=0)) goto LA9; add_177482_2381377266(&(*m0).s[(((Tcfilesection527005) 7))- 0], mainmodprocs_527148_3723162438); generatethreadvarssize_536771_839829468(m0); } LA9: ; code0 = genmodule_560491_839829468(m0, cfile0); { NIM_BOOL LOC13; LOC13 = (NIM_BOOL)0; LOC13 = shouldrecompile_561621_839829468(code0, cfile0); if (!LOC13) goto LA14; addfiletocompile_271863_2528170400(cfile0); } LA14: ; } goto LA1; LA5: ; { NIM_BOOL LOC17; NIM_BOOL LOC18; Ropeobj177006* code0; LOC17 = (NIM_BOOL)0; LOC18 = (NIM_BOOL)0; LOC18 = pending0; if (!(LOC18)) goto LA19; LOC18 = mergerequired_528832_2760143328(m0); LA19: ; LOC17 = LOC18; if (!(LOC17)) goto LA20; LOC17 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 12))&31U)))!=0)); LA20: ; if (!LOC17) goto LA21; mergefiles_529241_2760143328(cfile0, m0); geninitcode_560286_839829468(m0); finishtypedescriptions_533842_839829468(m0); code0 = genmodule_560491_839829468(m0, cfile0); writerope_177836_2381377266(code0, cfile0, NIM_FALSE); addfiletocompile_271863_2528170400(cfile0); } goto LA1; LA21: ; { NimStringDesc* LOC24; NIM_BOOL LOC25; LOC24 = (NimStringDesc*)0; LOC24 = toobjfile_271859_2528170400(cfilenoext0); LOC25 = (NIM_BOOL)0; LOC25 = nosexistsFile(LOC24); if (!!(LOC25)) goto LA26; addfiletocompile_271863_2528170400(cfile0); } goto LA1; LA26: ; LA1: ; addfiletolink_271872_2528170400(cfilenoext0); } N_NIMCALL(void, writeheader_561152_839829468)(Tcgen527027* m0) { Ropeobj177006* result0; Ropeobj177006* guard0; TY177507 LOC1; TY124315 LOC2; TY177507 LOC3; TY531289 LOC13; TY177507 LOC14; result0 = getcopyright_559665_839829468((*m0).filename); memset((void*)LOC1, 0, sizeof(LOC1)); memset((void*)(&LOC2), 0, sizeof(LOC2)); nossplitFile((*m0).filename, (&LOC2)); LOC1[0] = rope_177277_2381377266(LOC2.Field1); guard0 = HEX25_177905_2381377266(((NimStringDesc*) &T839829468_695), LOC1, 1); memset((void*)LOC3, 0, sizeof(LOC3)); LOC3[0] = guard0; addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_696), LOC3, 1); addinttypes_559659_839829468(&result0); generateheaders_558104_839829468(m0); generatethreadlocalstorage_536717_839829468(m0); { Tcfilesection527005 i_561174_839829468; NI res_561200_839829468; i_561174_839829468 = (Tcfilesection527005)0; res_561200_839829468 = ((NI) 1); { while (1) { Ropeobj177006* LOC7; Ropeobj177006* LOC8; if (!(res_561200_839829468 <= ((NI) 10))) goto LA6; i_561174_839829468 = ((Tcfilesection527005) (res_561200_839829468)); LOC7 = (Ropeobj177006*)0; LOC7 = gensectionstart_528015_2760143328(i_561174_839829468); add_177482_2381377266(&result0, LOC7); add_177482_2381377266(&result0, (*m0).s[(i_561174_839829468)- 0]); LOC8 = (Ropeobj177006*)0; LOC8 = gensectionend_528050_2760143328(i_561174_839829468); add_177482_2381377266(&result0, LOC8); res_561200_839829468 += ((NI) 1); } LA6: ; } } add_177482_2381377266(&result0, (*m0).s[(((Tcfilesection527005) 11))- 0]); { if (!((gglobaloptions_168130_2607990831 &((NU64)1<<((NU)(((Tglobaloption168013) 8))&63U)))!=0)) goto LA11; add_177487_2381377266(&result0, ((NimStringDesc*) &T839829468_22)); } LA11: ; memset((void*)LOC13, 0, sizeof(LOC13)); addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_697), LOC13, 0); memset((void*)LOC14, 0, sizeof(LOC14)); LOC14[0] = guard0; addf_178205_2381377266(&result0, ((NimStringDesc*) &T839829468_698), LOC14, 1); writerope_177836_2381377266(result0, (*m0).filename, NIM_FALSE); } N_NIMCALL(void, cgenwritemodules_561902_839829468)(void) { { if (!!((generatedheader_530201_839829468 == NIM_NIL))) goto LA3; finishmodule_561420_839829468(generatedheader_530201_839829468); } LA3: ; { while (1) { if (!(((NI) 0) < gforwardedprocscounter_527171_3723162438)) goto LA6; { Tcgen527027* m_561916_839829468; m_561916_839829468 = (Tcgen527027*)0; { NI i_561935_839829468; NI HEX3Atmp_561937_839829468; NI res_561939_839829468; i_561935_839829468 = (NI)0; HEX3Atmp_561937_839829468 = (NI)0; HEX3Atmp_561937_839829468 = (gmodules_527170_3723162438 ? (gmodules_527170_3723162438->Sup.len-1) : -1); res_561939_839829468 = ((NI) 0); { while (1) { if (!(res_561939_839829468 <= HEX3Atmp_561937_839829468)) goto LA10; i_561935_839829468 = res_561939_839829468; { if (!!((gmodules_527170_3723162438->data[i_561935_839829468] == NIM_NIL))) goto LA13; m_561916_839829468 = gmodules_527170_3723162438->data[i_561935_839829468]; { if (!!((*m_561916_839829468).Sup.fromcache)) goto LA17; finishmodule_561420_839829468(m_561916_839829468); } LA17: ; } LA13: ; res_561939_839829468 += ((NI) 1); } LA10: ; } } } } LA6: ; } { Tcgen527027* m_561917_839829468; m_561917_839829468 = (Tcgen527027*)0; { NI i_561946_839829468; NI HEX3Atmp_561948_839829468; NI res_561950_839829468; i_561946_839829468 = (NI)0; HEX3Atmp_561948_839829468 = (NI)0; HEX3Atmp_561948_839829468 = (gmodules_527170_3723162438 ? (gmodules_527170_3723162438->Sup.len-1) : -1); res_561950_839829468 = ((NI) 0); { while (1) { if (!(res_561950_839829468 <= HEX3Atmp_561948_839829468)) goto LA22; i_561946_839829468 = res_561950_839829468; { if (!!((gmodules_527170_3723162438->data[i_561946_839829468] == NIM_NIL))) goto LA25; m_561917_839829468 = gmodules_527170_3723162438->data[i_561946_839829468]; { if (!(*m_561917_839829468).Sup.fromcache) goto LA29; updatecachedmodule_561813_839829468(m_561917_839829468); } goto LA27; LA29: ; { writemodule_561637_839829468(m_561917_839829468, NIM_TRUE); } LA27: ; } LA25: ; res_561950_839829468 += ((NI) 1); } LA22: ; } } } writemapping_272789_2528170400(gmapping_527152_3723162438); { if (!!((generatedheader_530201_839829468 == NIM_NIL))) goto LA34; writeheader_561152_839829468(generatedheader_530201_839829468); } LA34: ; } N_NIMCALL(void, nullify_560833_839829468)(Ropeobj177006** arr0) { { Tcfilesection527005 i_560848_839829468; NI res_560853_839829468; i_560848_839829468 = (Tcfilesection527005)0; res_560853_839829468 = ((NI) 0); { while (1) { if (!(res_560853_839829468 <= ((NI) 17))) goto LA3; i_560848_839829468 = ((Tcfilesection527005) (res_560853_839829468)); unsureAsgnRef((void**) (&arr0[(i_560848_839829468)- 0]), NIM_NIL); res_560853_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, nullify_560858_839829468)(Ropeobj177006** arr0) { { NIM_CHAR i_561014_839829468; NI res_561019_839829468; i_561014_839829468 = (NIM_CHAR)0; res_561019_839829468 = ((NI) 48); { while (1) { if (!(res_561019_839829468 <= ((NI) 57))) goto LA3; i_561014_839829468 = ((NIM_CHAR) (res_561019_839829468)); unsureAsgnRef((void**) (&arr0[(((NU8)(i_561014_839829468)))- 48]), NIM_NIL); res_561019_839829468 += ((NI) 1); } LA3: ; } } } N_NIMCALL(void, resetmodule_560763_839829468)(Tcgen527027* m0) { initlinkedlist_147031_3771138726((&(*m0).headerfiles)); initintset_266885_2627731572((&(*m0).declaredprotos)); initidtable_294019_850551059((&(*m0).forwtypecache)); asgnRef((void**) (&(*m0).initproc), newproc_527206_3723162438(NIM_NIL, m0)); (*(*m0).initproc).options = initprocoptions_560635_839829468(m0); asgnRef((void**) (&(*m0).preinitproc), newpreinitproc_560625_839829468(m0)); asgnRef((void**) (&(*m0).postinitproc), newpostinitproc_560630_839829468(m0)); initnodetable_294085_850551059((&(*m0).datacache)); if ((*m0).typestack) nimGCunrefNoCycle((*m0).typestack); (*m0).typestack = (Ttypeseq290836*) newSeqRC1((&NTI290836), 0); if ((*m0).forwardedprocs) nimGCunrefNoCycle((*m0).forwardedprocs); (*m0).forwardedprocs = (Tsymseq290804*) newSeqRC1((&NTI290804), 0); asgnRefNoCycle((void**) (&(*m0).typenodesname), gettempname_531596_839829468(m0)); asgnRefNoCycle((void**) (&(*m0).nimtypesname), gettempname_531596_839829468(m0)); { if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag290184) 13))&31U)))!=0)) goto LA3; (*m0).flags |= ((NU8)1)<<((((Codegenflag527025) 0))%(sizeof(NU8)*8)); } goto LA1; LA3: ; { (*m0).flags &= ~(((NU8)1) << ((((Codegenflag527025) 0)) % (sizeof(NU8)*8))); } LA1: ; nullify_560833_839829468((*m0).s); (*m0).typenodes = ((NI) 0); (*m0).nimtypes = ((NI) 0); nullify_560858_839829468((*m0).extensionloaders); (*m0).Sup.fromcache = NIM_TRUE; } N_NIMCALL(void, resetcgenmodules_561024_839829468)(void) { { Tcgen527027* m_561026_839829468; m_561026_839829468 = (Tcgen527027*)0; { NI i_561031_839829468; NI HEX3Atmp_561033_839829468; NI res_561035_839829468; i_561031_839829468 = (NI)0; HEX3Atmp_561033_839829468 = (NI)0; HEX3Atmp_561033_839829468 = (gmodules_527170_3723162438 ? (gmodules_527170_3723162438->Sup.len-1) : -1); res_561035_839829468 = ((NI) 0); { while (1) { if (!(res_561035_839829468 <= HEX3Atmp_561033_839829468)) goto LA4; i_561031_839829468 = res_561035_839829468; { if (!!((gmodules_527170_3723162438->data[i_561031_839829468] == NIM_NIL))) goto LA7; m_561026_839829468 = gmodules_527170_3723162438->data[i_561031_839829468]; resetmodule_560763_839829468(m_561026_839829468); } LA7: ; res_561035_839829468 += ((NI) 1); } LA4: ; } } } } NIM_EXTERNC N_NOINLINE(void, compiler_cgenInit000)(void) { nimRegisterGlobalMarker(T839829468_2); nimRegisterGlobalMarker(T839829468_3); nimRegisterGlobalMarker(T839829468_5); nimRegisterGlobalMarker(T839829468_6); nimRegisterGlobalMarker(T839829468_7); nimRegisterGlobalMarker(T839829468_8); asgnRefNoCycle((void**) (&indent_530655_839829468), rope_177277_2381377266(((NimStringDesc*) &T839829468_4))); if (nimtvdeps_536674_839829468) nimGCunrefNoCycle(nimtvdeps_536674_839829468); nimtvdeps_536674_839829468 = (Ttypeseq290836*) newSeqRC1((&NTI290836), 0); chckNil((void*)(&nimtvdeclared_536675_839829468)); genericReset((void*)(&nimtvdeclared_536675_839829468), (&NTI266030)); initintset_266885_2627731572((&nimtvdeclared_536675_839829468)); breakpointid_546860_839829468 = ((NI) 0); } NIM_EXTERNC N_NOINLINE(void, compiler_cgenDatInit000)(void) { }
ParallelFor.h
/* Bullet Continuous Collision Detection and Physics Library Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include <stdio.h> //printf debugging #include <algorithm> // choose threading providers: #if BT_USE_TBB #define USE_TBB 1 // use Intel Threading Building Blocks for thread management #endif #if BT_USE_PPL #define USE_PPL 1 // use Microsoft Parallel Patterns Library (installed with Visual Studio 2010 and later) #endif // BT_USE_PPL #if BT_USE_OPENMP #define USE_OPENMP 1 // use OpenMP (also need to change compiler options for OpenMP support) #endif #if USE_OPENMP #include <omp.h> #endif // #if USE_OPENMP #if USE_PPL #include <ppl.h> // if you get a compile error here, check whether your version of Visual Studio includes PPL // Visual Studio 2010 and later should come with it #include <concrtrm.h> // for GetProcessorCount() #endif // #if USE_PPL #if USE_TBB #define __TBB_NO_IMPLICIT_LINKAGE 1 #include <tbb/tbb.h> #include <tbb/task_scheduler_init.h> #include <tbb/parallel_for.h> #include <tbb/blocked_range.h> #endif // #if USE_TBB class TaskManager { public: enum Api { apiNone, apiOpenMP, apiTbb, apiPpl, apiCount }; static const char* getApiName( Api api ) { switch ( api ) { case apiNone: return "None"; case apiOpenMP: return "OpenMP"; case apiTbb: return "Intel TBB"; case apiPpl: return "MS PPL"; default: return "unknown"; } } TaskManager() { m_api = apiNone; m_numThreads = 0; #if USE_TBB m_tbbSchedulerInit = NULL; #endif // #if USE_TBB } Api getApi() const { return m_api; } bool isSupported( Api api ) const { #if USE_OPENMP if ( api == apiOpenMP ) { return true; } #endif #if USE_TBB if ( api == apiTbb ) { return true; } #endif #if USE_PPL if ( api == apiPpl ) { return true; } #endif // apiNone is always "supported" return api == apiNone; } void setApi( Api api ) { if (isSupported(api)) { m_api = api; } else { // no compile time support for selected API, fallback to "none" m_api = apiNone; } } static int getMaxNumThreads() { #if USE_OPENMP return omp_get_max_threads(); #elif USE_PPL return concurrency::GetProcessorCount(); #elif USE_TBB return tbb::task_scheduler_init::default_num_threads(); #else return 1; #endif } int getNumThreads() const { return m_numThreads; } int setNumThreads( int numThreads ) { m_numThreads = ( std::max )( 1, numThreads ); #if USE_OPENMP omp_set_num_threads( m_numThreads ); #endif #if USE_PPL { using namespace concurrency; if ( CurrentScheduler::Id() != -1 ) { CurrentScheduler::Detach(); } SchedulerPolicy policy; policy.SetConcurrencyLimits( m_numThreads, m_numThreads ); CurrentScheduler::Create( policy ); } #endif #if USE_TBB if ( m_tbbSchedulerInit ) { delete m_tbbSchedulerInit; m_tbbSchedulerInit = NULL; } m_tbbSchedulerInit = new tbb::task_scheduler_init( m_numThreads ); #endif return m_numThreads; } void init( int numThread = 0 ) { if (m_numThreads == 0) { #if USE_PPL setApi( apiPpl ); #endif #if USE_TBB setApi( apiTbb ); #endif #if USE_OPENMP setApi( apiOpenMP ); #endif if (numThread == 0) numThread = getMaxNumThreads(); int threadCount = std::min( numThread, getMaxNumThreads() ); setNumThreads( threadCount ); } else { setNumThreads(m_numThreads); } } void shutdown() { #if USE_TBB if ( m_tbbSchedulerInit ) { delete m_tbbSchedulerInit; m_tbbSchedulerInit = NULL; } #endif } private: Api m_api; int m_numThreads; #if USE_TBB tbb::task_scheduler_init* m_tbbSchedulerInit; #endif // #if USE_TBB }; extern TaskManager gTaskMgr; inline static void initTaskScheduler() { gTaskMgr.init(); } inline static void cleanupTaskScheduler() { gTaskMgr.shutdown(); } #if USE_TBB /// /// TbbBodyAdapter -- Converts a body object that implements the /// "forLoop(int iBegin, int iEnd) const" function /// into a TBB compatible object that takes a tbb::blocked_range<int> type. /// template <class TBody> struct TbbBodyAdapter { const TBody* mBody; void operator()( const tbb::blocked_range<int>& range ) const { mBody->forLoop( range.begin(), range.end() ); } }; #endif // #if USE_TBB #if USE_PPL /// /// PplBodyAdapter -- Converts a body object that implements the /// "forLoop(int iBegin, int iEnd) const" function /// into a PPL compatible object that implements "void operator()( int ) const" /// template <class TBody> struct PplBodyAdapter { const TBody* mBody; int mGrainSize; int mIndexEnd; void operator()( int i ) const { mBody->forLoop( i, (std::min)(i + mGrainSize, mIndexEnd) ); } }; #endif // #if USE_PPL /// /// parallelFor -- interface for submitting work expressed as a for loop to the worker threads /// template <class TBody> void parallelFor( int iBegin, int iEnd, int grainSize, const TBody& body ) { #if USE_OPENMP if ( gTaskMgr.getApi() == TaskManager::apiOpenMP ) { #pragma omp parallel for schedule(static, 1) for ( int i = iBegin; i < iEnd; i += grainSize ) { body.forLoop( i, (std::min)( i + grainSize, iEnd ) ); } return; } #endif // #if USE_OPENMP #if USE_PPL if ( gTaskMgr.getApi() == TaskManager::apiPpl ) { // PPL dispatch PplBodyAdapter<TBody> pplBody; pplBody.mBody = &body; pplBody.mGrainSize = grainSize; pplBody.mIndexEnd = iEnd; // note: MSVC 2010 doesn't support partitioner args, so avoid them concurrency::parallel_for( iBegin, iEnd, grainSize, pplBody ); return; } #endif //#if USE_PPL #if USE_TBB if ( gTaskMgr.getApi() == TaskManager::apiTbb ) { // TBB dispatch TbbBodyAdapter<TBody> tbbBody; tbbBody.mBody = &body; tbb::parallel_for( tbb::blocked_range<int>( iBegin, iEnd, grainSize ), tbbBody, tbb::simple_partitioner() ); return; } #endif // #if USE_TBB { // run on main thread body.forLoop( iBegin, iEnd ); } }
clascl.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlascl.c, normal z -> c, Fri Sep 28 17:38:08 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include <math.h> /******************************************************************************/ int plasma_clascl(plasma_enum_t uplo, float cfrom, float cto, int m, int n, plasma_complex32_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaGeneral) && (uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (cfrom == 0.0 || isnan(cfrom)) { plasma_error("illegal value of cfrom"); return -2; } if (isnan(cto)) { plasma_error("illegal value of cto"); return -3; } if (m < 0) { plasma_error("illegal value of m"); return -4; } if (n < 0) { plasma_error("illegal value of n"); return -5; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -7; } // quick return if (imin(n, m) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_lascl(plasma, PlasmaComplexFloat, m, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_general_desc_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); // Call tile async function. plasma_omp_clascl(uplo, cfrom, cto, A, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(A, pA, lda, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /******************************************************************************/ void plasma_omp_clascl(plasma_enum_t uplo, float cfrom, float cto, plasma_desc_t A, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaGeneral) && (uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (cfrom == 0.0 || isnan(cfrom)) { plasma_error("illegal value of cfrom"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (isnan(cto)) { plasma_error("illegal value of cto"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) return; // Call the parallel function. plasma_pclascl(uplo, cfrom, cto, A, sequence, request); }
GB_binop__lor_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_bool // A.*B function (eWiseMult): GB_AemultB__lor_bool // A*D function (colscale): GB_AxD__lor_bool // D*A function (rowscale): GB_DxB__lor_bool // C+=B function (dense accum): GB_Cdense_accumB__lor_bool // C+=b function (dense accum): GB_Cdense_accumb__lor_bool // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_bool // C=scalar+B GB_bind1st__lor_bool // C=scalar+B' GB_bind1st_tran__lor_bool // C=A+scalar GB_bind2nd__lor_bool // C=A'+scalar GB_bind2nd_tran__lor_bool // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij || bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x || y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_BOOL || GxB_NO_LOR_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_bool ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_bool ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_bool ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_bool ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_bool ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lor_bool ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_bool ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_bool ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = Bx [p] ; Cx [p] = (x || bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_bool ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = Ax [p] ; Cx [p] = (aij || y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x || aij) ; \ } GrB_Info GB_bind1st_tran__lor_bool ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij || y) ; \ } GrB_Info GB_bind2nd_tran__lor_bool ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
oligo.c
/* * Copyright (c) 2014, Jason M. Wood <sandain@hotmail.com> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * Oligo categorizes sequence data based on oligonucleotide usage frequency. * * @file oligo.c * @mainpage Oligo * * Oligo categorizes sequence data based on oligonucleotide usage frequency. * * */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <omp.h> #include "cluster.h" #include "fasta.h" #include "sequence.h" #include "tools.h" /** * @def DEBUG * The default debug level to use. Set to 1 to turn debugging output on. */ #define DEBUG 1 /** * @def DEFAULT_OLIGO_LENGTH * The default length oligo to use. This becomes very computationally * intensive with larger numbers. */ #define DEFAULT_OLIGO_LENGTH 4 /** * @def DEFAULT_FRAGMENT_LENGTH * The default length fragment to use. Smaller fragments retain less * fidelity. */ #define DEFAULT_FRAGMENT_LENGTH 5000 void generateOligonucleotides ( size_t oligoLength, char ** oligonucleotides, char * oligo, size_t index ); double * oligoFrequency ( Fasta * fasta, size_t numSequences, size_t numCombinations, size_t oligoLength, size_t fragmentLength ); /** * The main entry point for the Oligo program. * * @param argc The number of arguments passed to Oligo. * @param argv The array of arguments passed to Oligo. * @return The error level, 0 for no error. */ int main ( int argc, char * argv[] ) { size_t oligoLength; size_t fragmentLength; size_t numSequences; size_t numCombinations; size_t i; double * frequency; char ** ids; char * fastaFile; /* Grab the fasta file from the command line, or produce an error. */ if (argc < 2) { printf ("Error, fasta formatted sequence file not provided!\n"); return 1; } fastaFile = strdup (argv[1]); /* Grab the oligo length from the command line, or use the default value if not provided. */ if (argc >= 3) { oligoLength = atoi (argv[2]); } else { printf ( "Oligo length parameter not supplied, using default value of %d.\n", DEFAULT_OLIGO_LENGTH ); oligoLength = DEFAULT_OLIGO_LENGTH; } /* Grab the fragment length from the command line, or use the default value if not provided. */ if (argc >= 4) { fragmentLength = atoi (argv[3]); } else { printf ( "Fragment length parameter not supplied, using default value of %d.\n", DEFAULT_FRAGMENT_LENGTH ); fragmentLength = DEFAULT_FRAGMENT_LENGTH; } /* Load the fasta file. */ Fasta * fasta = newFasta (fastaFile); setMinimumLength (fasta, fragmentLength); numSequences = numberSequences (fasta); ids = getIdentifiers (fasta); // XXX Use the fasta object throughout. Requires the fasta object to be smarter. /* Determine the number of nucleotide combinations. */ numCombinations = power (4, oligoLength); /* Generate the oligonucleotide usage frequency matrix. */ printf ("Generating the oligo usage frequency matrix.\n"); frequency = oligoFrequency ( fasta, numSequences, numCombinations, oligoLength, fragmentLength ); /* Display the oligonucleotide usage frequency matrix if debug is on. */ if (DEBUG > 0) { size_t s, c; for (s = 0; s < numSequences; s ++) { printf ("%s: ", ids[s]); for (c = 0; c < numCombinations; c ++) { printf ("%.4f ", frequency[s + c * numCombinations]); } printf ("\n"); } } /* Run the Kmeans algorithm. */ printf ("Running the Kmeans algorithm.\n"); runKmeans (ids, numSequences, numCombinations, frequency, 10, DEBUG); /* Run the AIB algorithm. */ printf ("Running the AIB algorithm.\n"); runAIB (ids, numSequences, numCombinations, frequency, DEBUG); /* Free reserved memory. */ free (ids); freeFasta (fasta); free (frequency); return 0; } /** * Generate all of the nucleotide combinations for the given length using a * recursive method. * * @param oligoLength The length of oligonucleotides to generate. * @param oligonucleotides The final list of oligonucleotides generated. * @param oligo The oligo being generated via recursion. * @param index The index of oligonucleotides to store the resulting * oligonucleotide in. */ void generateOligonucleotides ( size_t oligoLength, char ** oligonucleotides, char * oligo, size_t index ) { char nucs[4] = {'a', 'c', 'g', 't'}; size_t i; size_t length = strlen(oligo); /* If oligo is not long enough, append the four nucleotides to oligo and recurse. If oligo is long enough, push the oligo onto the oligonucleotides array and return. */ if (oligoLength > length) { /* Make a local copy of the oligo. */ char buffer[oligoLength + 1]; strcpy (buffer, oligo); for (i = 0; i < 4; i ++) { buffer[length] = nucs[i]; buffer[length + 1] = '\0'; generateOligonucleotides ( oligoLength, oligonucleotides, buffer, index + i * pow(4, length) ); } } else { oligonucleotides[index] = strdup (oligo); } } /** * Calculate the oligo usage frequency for each sequence in a fasta file. * * @param fasta The fasta object. * @param numSequences The number of sequences. * @param oligoLength - The length of the oligos. * @param fragmentLength - The minimum length of sequences to use. * @param numCombinations - The number of possible oligo combinations. * @return The oligo frequency matrix generated. */ double * oligoFrequency ( Fasta * fasta, size_t numSequences, size_t numCombinations, size_t oligoLength, size_t fragmentLength ) { size_t i, j, k, l; size_t numSamples; size_t numOligos; size_t stepSize; double * frequency; char ** oligonucleotides; char * sample; char * oligo; int r; /* Initialize the random number generator. */ srand (time (NULL)); /* Initialize the frequency matrix. */ frequency = malloc (numSequences * numCombinations * sizeof (double)); for (i = 0; i < numSequences * numCombinations; i ++) { frequency[i] = 0.0; } /* Generate all of the nucleotide combinations. */ oligonucleotides = malloc (numCombinations * sizeof (char *)); generateOligonucleotides (oligoLength, oligonucleotides, "", 0); /* Calculate the number of oligonucleotides that will be tested. */ numOligos = floor (fragmentLength / oligoLength); /* #pragma omp parallel shared ( \ fasta, numSequences, oligoLength, numCombinations, frequency, \ oligonucleotides, numOligos \ ) #pragma omp for */ /* Count the number of times each oligonucleotide appears in a sequence. */ Sequence * seq; i = 0; while (nextSequence (fasta, &seq)) { size_t sequenceLength = getSequenceLength (seq); /* Take samples from the sequence, and average the nucleotide usage of the samples. */ numSamples = rint ( (1.5 * sequenceLength) / (1.0 * fragmentLength) ); stepSize = rint ( (sequenceLength - fragmentLength) / (1.0 * numSamples) ); for (j = 0; j < numSamples; j ++) { /* Take a random sample of a section of the sequence. */ r = rand() % stepSize + j * stepSize; sample = strndup (getSequence (seq) + r, fragmentLength); for (k = 0; k < numOligos; k ++) { /* Compare the oligo generated from the sequence with each possible oligo. */ oligo = strndup (sample + k * oligoLength, oligoLength); for (l = 0; l < numCombinations; l ++) { /* Increment the frequency counter if a match is found in the sequence. */ if (sequenceIsEqual (oligo, oligonucleotides[l])) { #pragma omp critical frequency[i * numCombinations + l] ++; } } free (oligo); } free (sample); } freeSequence (seq); i ++; } /* Normalize the frequency values based on the number of samples and length of the sequence. */ for (i = 0; i < numSequences; i ++) { for (j = 0; j < numCombinations; j ++) { frequency[i * numCombinations + j] /= numSamples * (fragmentLength - oligoLength + 1); } } /* Free the memory used by the oligonucleotides array. */ for (i = 0; i < numCombinations; i ++) { free (oligonucleotides[i]); } free (oligonucleotides); return frequency; }
maxnum.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main() { int max_threads, scalar_num_threads; int *num_threads; omp_set_num_threads(128); // We obtain the default number of threads in the target region #pragma omp target map(from:max_threads) { max_threads = omp_get_max_threads(); } num_threads = (int *) malloc (sizeof(int) * max_threads); for (int i = 0; i < max_threads; ++i) { num_threads[i] = -1; } #pragma omp target parallel map(from:num_threads[0:max_threads], scalar_num_threads) { #pragma omp master { scalar_num_threads = omp_get_num_threads(); } int thread_id = omp_get_thread_num(); num_threads[thread_id] = omp_get_num_threads(); } fprintf(stderr, "MaxThreds %d ScNumThrd %d numThrds %d\n", max_threads, scalar_num_threads, num_threads[0]); return 0; }
GB_binop__bset_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__bset_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bset_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__bset_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bset_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint32) // C=scalar+B GB (_bind1st__bset_uint32) // C=scalar+B' GB (_bind1st_tran__bset_uint32) // C=A+scalar GB (_bind2nd__bset_uint32) // C=A'+scalar GB (_bind2nd_tran__bset_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = GB_BITSET (aij, bij, uint32_t, 32) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, uint32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_UINT32 || GxB_NO_BSET_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bset_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bset_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bset_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, uint32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, uint32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, uint32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bset_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, uint32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bset_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cpu.c
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "cpu.h" #include <limits.h> #include <stdio.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef __ANDROID__ #include <sys/syscall.h> #include <unistd.h> #include <stdint.h> #endif #ifdef __ANDROID__ // extract the ELF HW capabilities bitmap from /proc/self/auxv static unsigned int get_elf_hwcap_from_proc_self_auxv() { FILE* fp = fopen("/proc/self/auxv", "rb"); if (!fp) { return 0; } #define AT_HWCAP 16 #define AT_HWCAP2 26 #if __aarch64__ struct { uint64_t tag; uint64_t value; } entry; #else struct { unsigned int tag; unsigned int value; } entry; #endif unsigned int result = 0; while (!feof(fp)) { int nread = fread((char*)&entry, sizeof(entry), 1, fp); if (nread != 1) break; if (entry.tag == 0 && entry.value == 0) break; if (entry.tag == AT_HWCAP) { result = entry.value; break; } } fclose(fp); return result; } static unsigned int g_hwcaps = get_elf_hwcap_from_proc_self_auxv(); #if __aarch64__ // from arch/arm64/include/uapi/asm/hwcap.h #define HWCAP_ASIMD (1 << 1) #define HWCAP_ASIMDHP (1 << 10) #else // from arch/arm/include/uapi/asm/hwcap.h #define HWCAP_NEON (1 << 12) #define HWCAP_VFPv4 (1 << 16) #endif #endif // __ANDROID__ int cpu_support_arm_neon() { #ifdef __ANDROID__ #if __aarch64__ return g_hwcaps & HWCAP_ASIMD; #else return g_hwcaps & HWCAP_NEON; #endif #else return 0; #endif } int cpu_support_arm_vfpv4() { #ifdef __ANDROID__ #if __aarch64__ // neon always enable fma and fp16 return g_hwcaps & HWCAP_ASIMD; #else return g_hwcaps & HWCAP_VFPv4; #endif #else return 0; #endif } int cpu_support_arm_asimdhp() { #ifdef __ANDROID__ #if __aarch64__ return g_hwcaps & HWCAP_ASIMDHP; #else return 0; #endif #else return 0; #endif } static int get_cpucount() { int count = 0; #ifdef __ANDROID__ // get cpu count from /proc/cpuinfo FILE* fp = fopen("/proc/cpuinfo", "rb"); if (!fp) return 1; char line[1024]; while (!feof(fp)) { char* s = fgets(line, 1024, fp); if (!s) break; if (memcmp(line, "processor", 9) == 0) { count++; } } fclose(fp); #else #ifdef _OPENMP count = omp_get_max_threads(); #else count = 1; #endif // _OPENMP #endif if (count < 1) count = 1; if (count > (int)sizeof(size_t) * 8) { fprintf(stderr, "more than %d cpu detected, thread affinity may not work properly :(\n", (int)sizeof(size_t) * 8); } return count; } static int g_cpucount = -1; inline int get_cpu_count() { // retrieve gpu count if not initialized if (g_cpucount == -1) { g_cpucount = get_cpucount(); } return g_cpucount; } #ifdef __ANDROID__ static int get_max_freq_khz(int cpuid) { // first try, for all possible cpu char path[256]; sprintf(path, "/sys/devices/system/cpu/cpufreq/stats/cpu%d/time_in_state", cpuid); FILE* fp = fopen(path, "rb"); if (!fp) { // second try, for online cpu sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/stats/time_in_state", cpuid); fp = fopen(path, "rb"); if (fp) { int max_freq_khz = 0; while (!feof(fp)) { int freq_khz = 0; int nscan = fscanf(fp, "%d %*d", &freq_khz); if (nscan != 1) break; if (freq_khz > max_freq_khz) max_freq_khz = freq_khz; } fclose(fp); if (max_freq_khz != 0) return max_freq_khz; fp = NULL; } if (!fp) { // third try, for online cpu sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpuid); fp = fopen(path, "rb"); if (!fp) return -1; int max_freq_khz = -1; fscanf(fp, "%d", &max_freq_khz); fclose(fp); return max_freq_khz; } } int max_freq_khz = 0; while (!feof(fp)) { int freq_khz = 0; int nscan = fscanf(fp, "%d %*d", &freq_khz); if (nscan != 1) break; if (freq_khz > max_freq_khz) max_freq_khz = freq_khz; } fclose(fp); return max_freq_khz; } static int set_sched_affinity(size_t thread_affinity_mask) { // cpu_set_t definition // ref http://stackoverflow.com/questions/16319725/android-set-thread-affinity #define CPU_SETSIZE 1024 #define __NCPUBITS (8 * sizeof (unsigned long)) typedef struct { unsigned long __bits[CPU_SETSIZE / __NCPUBITS]; } cpu_set_t; #define CPU_SET(cpu, cpusetp) \ ((cpusetp)->__bits[(cpu)/__NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS))) #define CPU_ZERO(cpusetp) \ memset((cpusetp), 0, sizeof(cpu_set_t)) // set affinity for thread #ifdef __GLIBC__ pid_t pid = syscall(SYS_gettid); #else #ifdef PI3 pid_t pid = getpid(); #else pid_t pid = gettid(); #endif #endif cpu_set_t mask; CPU_ZERO(&mask); for (int i=0; i<(int)sizeof(size_t) * 8; i++) { if (thread_affinity_mask & (1 << i)) CPU_SET(i, &mask); } int syscallret = syscall(__NR_sched_setaffinity, pid, sizeof(mask), &mask); if (syscallret) { fprintf(stderr, "syscall error %d\n", syscallret); return -1; } return 0; } #endif // __ANDROID__ static int g_powersave = 0; int get_cpu_powersave() { return g_powersave; } int set_cpu_powersave(int powersave) { if (powersave < 0 || powersave > 2) { fprintf(stderr, "powersave %d not supported\n", powersave); return -1; } size_t thread_affinity_mask = get_cpu_thread_affinity_mask(powersave); int ret = set_cpu_thread_affinity(thread_affinity_mask); if (ret != 0) return ret; g_powersave = powersave; return 0; } static size_t g_thread_affinity_mask_all = 0; static size_t g_thread_affinity_mask_little = 0; static size_t g_thread_affinity_mask_big = 0; static int setup_thread_affinity_masks() { g_thread_affinity_mask_all = (1 << get_cpu_count()) - 1; #ifdef __ANDROID__ int max_freq_khz_min = INT_MAX; int max_freq_khz_max = 0; vector_def(int) cpu_max_freq_khz; vector_init(cpu_max_freq_khz); vector_resize(cpu_max_freq_khz, get_cpu_count()); for (int i=0; i<get_cpu_count(); i++) { int max_freq_khz = get_max_freq_khz(i); // fprintf(stderr, "%d max freq = %d khz\n", i, max_freq_khz); cpu_max_freq_khz[i] = max_freq_khz; if (max_freq_khz > max_freq_khz_max) max_freq_khz_max = max_freq_khz; if (max_freq_khz < max_freq_khz_min) max_freq_khz_min = max_freq_khz; } int max_freq_khz_medium = (max_freq_khz_min + max_freq_khz_max) / 2; if (max_freq_khz_medium == max_freq_khz_max) { g_thread_affinity_mask_little = 0; g_thread_affinity_mask_big = g_thread_affinity_mask_all; return 0; } for (int i=0; i<get_cpu_count(); i++) { if (cpu_max_freq_khz[i] < max_freq_khz_medium) g_thread_affinity_mask_little |= (1 << i); else g_thread_affinity_mask_big |= (1 << i); } vector_destroy(cpu_max_freq_khz); #else // TODO implement me for other platforms g_thread_affinity_mask_little = 0; g_thread_affinity_mask_big = g_thread_affinity_mask_all; #endif return 0; } size_t get_cpu_thread_affinity_mask(int powersave) { if (g_thread_affinity_mask_all == 0) { setup_thread_affinity_masks(); } if (g_thread_affinity_mask_little == 0) { // SMP cpu powersave not supported // fallback to all cores anyway return g_thread_affinity_mask_all; } if (powersave == 0) return g_thread_affinity_mask_all; if (powersave == 1) return g_thread_affinity_mask_little; if (powersave == 2) return g_thread_affinity_mask_big; fprintf(stderr, "powersave %d not supported\n", powersave); // fallback to all cores anyway return g_thread_affinity_mask_all; } int set_cpu_thread_affinity(size_t thread_affinity_mask) { #ifdef __ANDROID__ int num_threads = 0; for (int i=0; i<(int)sizeof(size_t) * 8; i++) { if (thread_affinity_mask & (1 << i)) num_threads++; } #ifdef _OPENMP // set affinity for each thread set_omp_num_threads(num_threads); vector_def(int) ssarets; vector_init(ssarets); vector_resize(ssarets, num_threads); #pragma omp parallel for num_threads(num_threads) for (int i=0; i<num_threads; i++) { vector_get(ssarets, i) = set_sched_affinity(thread_affinity_mask); } for (int i=0; i<num_threads; i++) { if (vector_get(ssarets, i) != 0) return -1; } vector_destroy(ssarets); #else int ssaret = set_sched_affinity(thread_affinity_mask); if (ssaret != 0) return -1; #endif return 0; #else // TODO (void)thread_affinity_mask; return -1; #endif } int get_omp_num_threads() { #ifdef _OPENMP return omp_get_num_threads(); #else return 1; #endif } void set_omp_num_threads(int num_threads) { #ifdef _OPENMP omp_set_num_threads(num_threads); #else (void)num_threads; #endif } int get_omp_dynamic() { #ifdef _OPENMP return omp_get_dynamic(); #else return 0; #endif } void set_omp_dynamic(int dynamic) { #ifdef _OPENMP omp_set_dynamic(dynamic); #else (void)dynamic; #endif } int get_omp_thread_num() { #if _OPENMP return omp_get_thread_num(); #else return 0; #endif }
bitshuffle_core.c
/* * Bitshuffle - Filter for improving compression of typed binary data. * * Author: Kiyoshi Masui <kiyo@physics.ubc.ca> * Website: http://www.github.com/kiyo-masui/bitshuffle * Created: 2014 * * See LICENSE file for details about copyright and rights to use. * */ #include "bitshuffle_core.h" #include "bitshuffle_internals.h" #include <stdio.h> #include <string.h> #if defined(__AVX2__) && defined (__SSE2__) #define USEAVX2 #endif #if defined(__SSE2__) || defined(NO_WARN_X86_INTRINSICS) #define USESSE2 #endif #if defined(__ARM_NEON__) || (__ARM_NEON) #ifdef __aarch64__ #define USEARMNEON #endif #endif // Conditional includes for SSE2 and AVX2. #ifdef USEAVX2 #include <immintrin.h> #elif defined USESSE2 #include <emmintrin.h> #elif defined USEARMNEON #include <arm_neon.h> #endif #if defined(_OPENMP) && defined(_MSC_VER) typedef int64_t omp_size_t; #else typedef size_t omp_size_t; #endif // Macros. #define CHECK_MULT_EIGHT(n) if (n % 8) return -80; #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) /* ---- Functions indicating compile time instruction set. ---- */ int bshuf_using_NEON(void) { #ifdef USEARMNEON return 1; #else return 0; #endif } int bshuf_using_SSE2(void) { #ifdef USESSE2 return 1; #else return 0; #endif } int bshuf_using_AVX2(void) { #ifdef USEAVX2 return 1; #else return 0; #endif } /* ---- Worker code not requiring special instruction sets. ---- * * The following code does not use any x86 specific vectorized instructions * and should compile on any machine * */ /* Transpose 8x8 bit array packed into a single quadword *x*. * *t* is workspace. */ #define TRANS_BIT_8X8(x, t) { \ t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AALL; \ x = x ^ t ^ (t << 7); \ t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCLL; \ x = x ^ t ^ (t << 14); \ t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0LL; \ x = x ^ t ^ (t << 28); \ } /* Transpose 8x8 bit array along the diagonal from upper right to lower left */ #define TRANS_BIT_8X8_BE(x, t) { \ t = (x ^ (x >> 9)) & 0x0055005500550055LL; \ x = x ^ t ^ (t << 9); \ t = (x ^ (x >> 18)) & 0x0000333300003333LL; \ x = x ^ t ^ (t << 18); \ t = (x ^ (x >> 36)) & 0x000000000F0F0F0FLL; \ x = x ^ t ^ (t << 36); \ } /* Transpose of an array of arbitrarily typed elements. */ #define TRANS_ELEM_TYPE(in, out, lda, ldb, type_t) { \ size_t ii, jj, kk; \ const type_t* in_type = (const type_t*) in; \ type_t* out_type = (type_t*) out; \ for(ii = 0; ii + 7 < lda; ii += 8) { \ for(jj = 0; jj < ldb; jj++) { \ for(kk = 0; kk < 8; kk++) { \ out_type[jj*lda + ii + kk] = \ in_type[ii*ldb + kk * ldb + jj]; \ } \ } \ } \ for(ii = lda - lda % 8; ii < lda; ii ++) { \ for(jj = 0; jj < ldb; jj++) { \ out_type[jj*lda + ii] = in_type[ii*ldb + jj]; \ } \ } \ } /* Memory copy with bshuf call signature. For testing and profiling. */ int64_t bshuf_copy(const void* in, void* out, const size_t size, const size_t elem_size) { const char* in_b = (const char*) in; char* out_b = (char*) out; memcpy(out_b, in_b, size * elem_size); return size * elem_size; } /* Transpose bytes within elements, starting partway through input. */ int64_t bshuf_trans_byte_elem_remainder(const void* in, void* out, const size_t size, const size_t elem_size, const size_t start) { size_t ii, jj, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(start); if (size > start) { // ii loop separated into 2 loops so the compiler can unroll // the inner one. for (ii = start; ii + 7 < size; ii += 8) { for (jj = 0; jj < elem_size; jj++) { for (kk = 0; kk < 8; kk++) { out_b[jj * size + ii + kk] = in_b[ii * elem_size + kk * elem_size + jj]; } } } for (ii = size - size % 8; ii < size; ii ++) { for (jj = 0; jj < elem_size; jj++) { out_b[jj * size + ii] = in_b[ii * elem_size + jj]; } } } return size * elem_size; } /* Transpose bytes within elements. */ int64_t bshuf_trans_byte_elem_scal(const void* in, void* out, const size_t size, const size_t elem_size) { return bshuf_trans_byte_elem_remainder(in, out, size, elem_size, 0); } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_remainder(const void* in, void* out, const size_t size, const size_t elem_size, const size_t start_byte) { const uint64_t* in_b = (const uint64_t*) in; uint8_t* out_b = (uint8_t*) out; uint64_t x, t; size_t ii, kk; size_t nbyte = elem_size * size; size_t nbyte_bitrow = nbyte / 8; uint64_t e=1; const int little_endian = *(uint8_t *) &e == 1; const size_t bit_row_skip = little_endian ? nbyte_bitrow : -nbyte_bitrow; const int64_t bit_row_offset = little_endian ? 0 : 7 * nbyte_bitrow; CHECK_MULT_EIGHT(nbyte); CHECK_MULT_EIGHT(start_byte); for (ii = start_byte / 8; ii < nbyte_bitrow; ii ++) { x = in_b[ii]; if (little_endian) { TRANS_BIT_8X8(x, t); } else { TRANS_BIT_8X8_BE(x, t); } for (kk = 0; kk < 8; kk ++) { out_b[bit_row_offset + kk * bit_row_skip + ii] = x; x = x >> 8; } } return size * elem_size; } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_scal(const void* in, void* out, const size_t size, const size_t elem_size) { return bshuf_trans_bit_byte_remainder(in, out, size, elem_size, 0); } /* General transpose of an array, optimized for large element sizes. */ int64_t bshuf_trans_elem(const void* in, void* out, const size_t lda, const size_t ldb, const size_t elem_size) { size_t ii, jj; const char* in_b = (const char*) in; char* out_b = (char*) out; for(ii = 0; ii < lda; ii++) { for(jj = 0; jj < ldb; jj++) { memcpy(&out_b[(jj*lda + ii) * elem_size], &in_b[(ii*ldb + jj) * elem_size], elem_size); } } return lda * ldb * elem_size; } /* Transpose rows of shuffled bits (size / 8 bytes) within groups of 8. */ int64_t bshuf_trans_bitrow_eight(const void* in, void* out, const size_t size, const size_t elem_size) { size_t nbyte_bitrow = size / 8; CHECK_MULT_EIGHT(size); return bshuf_trans_elem(in, out, 8, elem_size, nbyte_bitrow); } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_scal(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; void *tmp_buf; CHECK_MULT_EIGHT(size); tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_scal(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_scal(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj, kk, nbyte_row; const char *in_b; char *out_b; in_b = (const char*) in; out_b = (char*) out; nbyte_row = size / 8; CHECK_MULT_EIGHT(size); for (jj = 0; jj < elem_size; jj++) { for (ii = 0; ii < nbyte_row; ii++) { for (kk = 0; kk < 8; kk++) { out_b[ii * 8 * elem_size + jj * 8 + kk] = \ in_b[(jj * 8 + kk) * nbyte_row + ii]; } } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_scal(const void* in, void* out, \ const size_t size, const size_t elem_size) { const char *in_b; char *out_b; uint64_t x, t; size_t ii, jj, kk; size_t nbyte, out_index; uint64_t e=1; const int little_endian = *(uint8_t *) &e == 1; const size_t elem_skip = little_endian ? elem_size : -elem_size; const uint64_t elem_offset = little_endian ? 0 : 7 * elem_size; CHECK_MULT_EIGHT(size); in_b = (const char*) in; out_b = (char*) out; nbyte = elem_size * size; for (jj = 0; jj < 8 * elem_size; jj += 8) { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { x = *((uint64_t*) &in_b[ii + jj]); if (little_endian) { TRANS_BIT_8X8(x, t); } else { TRANS_BIT_8X8_BE(x, t); } for (kk = 0; kk < 8; kk++) { out_index = ii + jj / 8 + elem_offset + kk * elem_skip; *((uint8_t*) &out_b[out_index]) = x; x = x >> 8; } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_scal(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; void *tmp_buf; CHECK_MULT_EIGHT(size); tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_scal(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_scal(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* ---- Worker code that uses Arm NEON ---- * * The following code makes use of the Arm NEON instruction set. * NEON technology is the implementation of the ARM Advanced Single * Instruction Multiple Data (SIMD) extension. * The NEON unit is the component of the processor that executes SIMD instructions. * It is also called the NEON Media Processing Engine (MPE). * */ #ifdef USEARMNEON /* Transpose bytes within elements for 16 bit elements. */ int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) { size_t ii; const char *in_b = (const char*) in; char *out_b = (char*) out; int8x16_t a0, b0, a1, b1; for (ii=0; ii + 15 < size; ii += 16) { a0 = vld1q_s8(in_b + 2*ii + 0*16); b0 = vld1q_s8(in_b + 2*ii + 1*16); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); a0 = vzip1q_s8(a1, b1); b0 = vzip2q_s8(a1, b1); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); a0 = vzip1q_s8(a1, b1); b0 = vzip2q_s8(a1, b1); vst1q_s8(out_b + 0*size + ii, a0); vst1q_s8(out_b + 1*size + ii, b0); } return bshuf_trans_byte_elem_remainder(in, out, size, 2, size - size % 16); } /* Transpose bytes within elements for 32 bit elements. */ int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) { size_t ii; const char *in_b; char *out_b; in_b = (const char*) in; out_b = (char*) out; int8x16_t a0, b0, c0, d0, a1, b1, c1, d1; int64x2_t a2, b2, c2, d2; for (ii=0; ii + 15 < size; ii += 16) { a0 = vld1q_s8(in_b + 4*ii + 0*16); b0 = vld1q_s8(in_b + 4*ii + 1*16); c0 = vld1q_s8(in_b + 4*ii + 2*16); d0 = vld1q_s8(in_b + 4*ii + 3*16); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); c1 = vzip1q_s8(c0, d0); d1 = vzip2q_s8(c0, d0); a0 = vzip1q_s8(a1, b1); b0 = vzip2q_s8(a1, b1); c0 = vzip1q_s8(c1, d1); d0 = vzip2q_s8(c1, d1); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); c1 = vzip1q_s8(c0, d0); d1 = vzip2q_s8(c0, d0); a2 = vzip1q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1)); b2 = vzip2q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1)); c2 = vzip1q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1)); d2 = vzip2q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1)); vst1q_s64((int64_t *) (out_b + 0*size + ii), a2); vst1q_s64((int64_t *) (out_b + 1*size + ii), b2); vst1q_s64((int64_t *) (out_b + 2*size + ii), c2); vst1q_s64((int64_t *) (out_b + 3*size + ii), d2); } return bshuf_trans_byte_elem_remainder(in, out, size, 4, size - size % 16); } /* Transpose bytes within elements for 64 bit elements. */ int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) { size_t ii; const char* in_b = (const char*) in; char* out_b = (char*) out; int8x16_t a0, b0, c0, d0, e0, f0, g0, h0; int8x16_t a1, b1, c1, d1, e1, f1, g1, h1; for (ii=0; ii + 15 < size; ii += 16) { a0 = vld1q_s8(in_b + 8*ii + 0*16); b0 = vld1q_s8(in_b + 8*ii + 1*16); c0 = vld1q_s8(in_b + 8*ii + 2*16); d0 = vld1q_s8(in_b + 8*ii + 3*16); e0 = vld1q_s8(in_b + 8*ii + 4*16); f0 = vld1q_s8(in_b + 8*ii + 5*16); g0 = vld1q_s8(in_b + 8*ii + 6*16); h0 = vld1q_s8(in_b + 8*ii + 7*16); a1 = vzip1q_s8 (a0, b0); b1 = vzip2q_s8 (a0, b0); c1 = vzip1q_s8 (c0, d0); d1 = vzip2q_s8 (c0, d0); e1 = vzip1q_s8 (e0, f0); f1 = vzip2q_s8 (e0, f0); g1 = vzip1q_s8 (g0, h0); h1 = vzip2q_s8 (g0, h0); a0 = vzip1q_s8 (a1, b1); b0 = vzip2q_s8 (a1, b1); c0 = vzip1q_s8 (c1, d1); d0 = vzip2q_s8 (c1, d1); e0 = vzip1q_s8 (e1, f1); f0 = vzip2q_s8 (e1, f1); g0 = vzip1q_s8 (g1, h1); h0 = vzip2q_s8 (g1, h1); a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0)); b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0)); c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0)); d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0)); e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0)); f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0)); g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0)); h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0)); a0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1)); b0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1)); c0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1)); d0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1)); e0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1)); f0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1)); g0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1)); h0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1)); vst1q_s8(out_b + 0*size + ii, a0); vst1q_s8(out_b + 1*size + ii, b0); vst1q_s8(out_b + 2*size + ii, c0); vst1q_s8(out_b + 3*size + ii, d0); vst1q_s8(out_b + 4*size + ii, e0); vst1q_s8(out_b + 5*size + ii, f0); vst1q_s8(out_b + 6*size + ii, g0); vst1q_s8(out_b + 7*size + ii, h0); } return bshuf_trans_byte_elem_remainder(in, out, size, 8, size - size % 16); } /* Transpose bytes within elements using best NEON algorithm available. */ int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; // Trivial cases: power of 2 bytes. switch (elem_size) { case 1: count = bshuf_copy(in, out, size, elem_size); return count; case 2: count = bshuf_trans_byte_elem_NEON_16(in, out, size); return count; case 4: count = bshuf_trans_byte_elem_NEON_32(in, out, size); return count; case 8: count = bshuf_trans_byte_elem_NEON_64(in, out, size); return count; } // Worst case: odd number of bytes. Turns out that this is faster for // (odd * 2) byte elements as well (hence % 4). if (elem_size % 4) { count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); return count; } // Multiple of power of 2: transpose hierarchically. { size_t nchunk_elem; void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; if ((elem_size % 8) == 0) { nchunk_elem = elem_size / 8; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t); count = bshuf_trans_byte_elem_NEON_64(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size); } else if ((elem_size % 4) == 0) { nchunk_elem = elem_size / 4; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t); count = bshuf_trans_byte_elem_NEON_32(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size); } else { // Not used since scalar algorithm is faster. nchunk_elem = elem_size / 2; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t); count = bshuf_trans_byte_elem_NEON_16(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size); } free(tmp_buf); return count; } } /* Creates a mask made up of the most significant * bit of each byte of 'input' */ int32_t move_byte_mask_neon(uint8x16_t input) { return ( ((input[0] & 0x80) >> 7) | (((input[1] & 0x80) >> 7) << 1) | (((input[2] & 0x80) >> 7) << 2) | (((input[3] & 0x80) >> 7) << 3) | (((input[4] & 0x80) >> 7) << 4) | (((input[5] & 0x80) >> 7) << 5) | (((input[6] & 0x80) >> 7) << 6) | (((input[7] & 0x80) >> 7) << 7) | (((input[8] & 0x80) >> 7) << 8) | (((input[9] & 0x80) >> 7) << 9) | (((input[10] & 0x80) >> 7) << 10) | (((input[11] & 0x80) >> 7) << 11) | (((input[12] & 0x80) >> 7) << 12) | (((input[13] & 0x80) >> 7) << 13) | (((input[14] & 0x80) >> 7) << 14) | (((input[15] & 0x80) >> 7) << 15) ); } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; uint16_t* out_ui16; int64_t count; size_t nbyte = elem_size * size; CHECK_MULT_EIGHT(nbyte); int16x8_t xmm; int32_t bt; for (ii = 0; ii + 15 < nbyte; ii += 16) { xmm = vld1q_s16((int16_t *) (in_b + ii)); for (kk = 0; kk < 8; kk++) { bt = move_byte_mask_neon((uint8x16_t) xmm); xmm = vshlq_n_s16(xmm, 1); out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_ui16 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 16); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_NEON(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_NEON(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; int8x16_t a0, b0, c0, d0, e0, f0, g0, h0; int8x16_t a1, b1, c1, d1, e1, f1, g1, h1; int64x1_t *as, *bs, *cs, *ds, *es, *fs, *gs, *hs; for (ii = 0; ii + 7 < nrows; ii += 8) { for (jj = 0; jj + 15 < nbyte_row; jj += 16) { a0 = vld1q_s8(in_b + (ii + 0)*nbyte_row + jj); b0 = vld1q_s8(in_b + (ii + 1)*nbyte_row + jj); c0 = vld1q_s8(in_b + (ii + 2)*nbyte_row + jj); d0 = vld1q_s8(in_b + (ii + 3)*nbyte_row + jj); e0 = vld1q_s8(in_b + (ii + 4)*nbyte_row + jj); f0 = vld1q_s8(in_b + (ii + 5)*nbyte_row + jj); g0 = vld1q_s8(in_b + (ii + 6)*nbyte_row + jj); h0 = vld1q_s8(in_b + (ii + 7)*nbyte_row + jj); a1 = vzip1q_s8(a0, b0); b1 = vzip1q_s8(c0, d0); c1 = vzip1q_s8(e0, f0); d1 = vzip1q_s8(g0, h0); e1 = vzip2q_s8(a0, b0); f1 = vzip2q_s8(c0, d0); g1 = vzip2q_s8(e0, f0); h1 = vzip2q_s8(g0, h0); a0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1)); b0= (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1)); c0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1)); d0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1)); e0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1)); f0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1)); g0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1)); h0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1)); a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0)); b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0)); c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0)); d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0)); e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0)); f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0)); g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0)); h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0)); as = (int64x1_t *) &a1; bs = (int64x1_t *) &b1; cs = (int64x1_t *) &c1; ds = (int64x1_t *) &d1; es = (int64x1_t *) &e1; fs = (int64x1_t *) &f1; gs = (int64x1_t *) &g1; hs = (int64x1_t *) &h1; vst1_s64((int64_t *)(out_b + (jj + 0) * nrows + ii), *as); vst1_s64((int64_t *)(out_b + (jj + 1) * nrows + ii), *(as + 1)); vst1_s64((int64_t *)(out_b + (jj + 2) * nrows + ii), *bs); vst1_s64((int64_t *)(out_b + (jj + 3) * nrows + ii), *(bs + 1)); vst1_s64((int64_t *)(out_b + (jj + 4) * nrows + ii), *cs); vst1_s64((int64_t *)(out_b + (jj + 5) * nrows + ii), *(cs + 1)); vst1_s64((int64_t *)(out_b + (jj + 6) * nrows + ii), *ds); vst1_s64((int64_t *)(out_b + (jj + 7) * nrows + ii), *(ds + 1)); vst1_s64((int64_t *)(out_b + (jj + 8) * nrows + ii), *es); vst1_s64((int64_t *)(out_b + (jj + 9) * nrows + ii), *(es + 1)); vst1_s64((int64_t *)(out_b + (jj + 10) * nrows + ii), *fs); vst1_s64((int64_t *)(out_b + (jj + 11) * nrows + ii), *(fs + 1)); vst1_s64((int64_t *)(out_b + (jj + 12) * nrows + ii), *gs); vst1_s64((int64_t *)(out_b + (jj + 13) * nrows + ii), *(gs + 1)); vst1_s64((int64_t *)(out_b + (jj + 14) * nrows + ii), *hs); vst1_s64((int64_t *)(out_b + (jj + 15) * nrows + ii), *(hs + 1)); } for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj]; out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj]; out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj]; out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj]; out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj]; out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj]; out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj]; out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. const char* in_b = (const char*) in; uint16_t* out_ui16 = (uint16_t*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; int16x8_t xmm; int32_t bt; if (elem_size % 2) { bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size); } else { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) { xmm = vld1q_s16((int16_t *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = move_byte_mask_neon((uint8x16_t) xmm); xmm = vshlq_n_s16(xmm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); out_ui16[ind / 2] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_NEON(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_NEON(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USEARMNEON int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) { return -13; } int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) { return -13; } int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) { return -13; } int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } #endif /* ---- Worker code that uses SSE2 ---- * * The following code makes use of the SSE2 instruction set and specialized * 16 byte registers. The SSE2 instructions are present on modern x86 * processors. The first Intel processor microarchitecture supporting SSE2 was * Pentium 4 (2000). * */ #ifdef USESSE2 /* Transpose bytes within elements for 16 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) { size_t ii; const char *in_b = (const char*) in; char *out_b = (char*) out; __m128i a0, b0, a1, b1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 1*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); } return bshuf_trans_byte_elem_remainder(in, out, size, 2, size - size % 16); } /* Transpose bytes within elements for 32 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) { size_t ii; const char *in_b; char *out_b; in_b = (const char*) in; out_b = (char*) out; __m128i a0, b0, c0, d0, a1, b1, c1, d1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 1*16]); c0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 2*16]); d0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 3*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); c0 = _mm_unpacklo_epi8(c1, d1); d0 = _mm_unpackhi_epi8(c1, d1); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); a0 = _mm_unpacklo_epi64(a1, c1); b0 = _mm_unpackhi_epi64(a1, c1); c0 = _mm_unpacklo_epi64(b1, d1); d0 = _mm_unpackhi_epi64(b1, d1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); _mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0); _mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0); } return bshuf_trans_byte_elem_remainder(in, out, size, 4, size - size % 16); } /* Transpose bytes within elements for 64 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) { size_t ii; const char* in_b = (const char*) in; char* out_b = (char*) out; __m128i a0, b0, c0, d0, e0, f0, g0, h0; __m128i a1, b1, c1, d1, e1, f1, g1, h1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 1*16]); c0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 2*16]); d0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 3*16]); e0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 4*16]); f0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 5*16]); g0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 6*16]); h0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 7*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); e1 = _mm_unpacklo_epi8(e0, f0); f1 = _mm_unpackhi_epi8(e0, f0); g1 = _mm_unpacklo_epi8(g0, h0); h1 = _mm_unpackhi_epi8(g0, h0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); c0 = _mm_unpacklo_epi8(c1, d1); d0 = _mm_unpackhi_epi8(c1, d1); e0 = _mm_unpacklo_epi8(e1, f1); f0 = _mm_unpackhi_epi8(e1, f1); g0 = _mm_unpacklo_epi8(g1, h1); h0 = _mm_unpackhi_epi8(g1, h1); a1 = _mm_unpacklo_epi32(a0, c0); b1 = _mm_unpackhi_epi32(a0, c0); c1 = _mm_unpacklo_epi32(b0, d0); d1 = _mm_unpackhi_epi32(b0, d0); e1 = _mm_unpacklo_epi32(e0, g0); f1 = _mm_unpackhi_epi32(e0, g0); g1 = _mm_unpacklo_epi32(f0, h0); h1 = _mm_unpackhi_epi32(f0, h0); a0 = _mm_unpacklo_epi64(a1, e1); b0 = _mm_unpackhi_epi64(a1, e1); c0 = _mm_unpacklo_epi64(b1, f1); d0 = _mm_unpackhi_epi64(b1, f1); e0 = _mm_unpacklo_epi64(c1, g1); f0 = _mm_unpackhi_epi64(c1, g1); g0 = _mm_unpacklo_epi64(d1, h1); h0 = _mm_unpackhi_epi64(d1, h1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); _mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0); _mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0); _mm_storeu_si128((__m128i *) &out_b[4*size + ii], e0); _mm_storeu_si128((__m128i *) &out_b[5*size + ii], f0); _mm_storeu_si128((__m128i *) &out_b[6*size + ii], g0); _mm_storeu_si128((__m128i *) &out_b[7*size + ii], h0); } return bshuf_trans_byte_elem_remainder(in, out, size, 8, size - size % 16); } /* Transpose bytes within elements using best SSE algorithm available. */ int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; // Trivial cases: power of 2 bytes. switch (elem_size) { case 1: count = bshuf_copy(in, out, size, elem_size); return count; case 2: count = bshuf_trans_byte_elem_SSE_16(in, out, size); return count; case 4: count = bshuf_trans_byte_elem_SSE_32(in, out, size); return count; case 8: count = bshuf_trans_byte_elem_SSE_64(in, out, size); return count; } // Worst case: odd number of bytes. Turns out that this is faster for // (odd * 2) byte elements as well (hence % 4). if (elem_size % 4) { count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); return count; } // Multiple of power of 2: transpose hierarchically. { size_t nchunk_elem; void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; if ((elem_size % 8) == 0) { nchunk_elem = elem_size / 8; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t); count = bshuf_trans_byte_elem_SSE_64(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size); } else if ((elem_size % 4) == 0) { nchunk_elem = elem_size / 4; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t); count = bshuf_trans_byte_elem_SSE_32(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size); } else { // Not used since scalar algorithm is faster. nchunk_elem = elem_size / 2; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t); count = bshuf_trans_byte_elem_SSE_16(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size); } free(tmp_buf); return count; } } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; uint16_t* out_ui16; int64_t count; size_t nbyte = elem_size * size; CHECK_MULT_EIGHT(nbyte); __m128i xmm; int32_t bt; for (ii = 0; ii + 15 < nbyte; ii += 16) { xmm = _mm_loadu_si128((__m128i *) &in_b[ii]); for (kk = 0; kk < 8; kk++) { bt = _mm_movemask_epi8(xmm); xmm = _mm_slli_epi16(xmm, 1); out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_ui16 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 16); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_SSE(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; __m128i a0, b0, c0, d0, e0, f0, g0, h0; __m128i a1, b1, c1, d1, e1, f1, g1, h1; __m128 *as, *bs, *cs, *ds, *es, *fs, *gs, *hs; for (ii = 0; ii + 7 < nrows; ii += 8) { for (jj = 0; jj + 15 < nbyte_row; jj += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 0)*nbyte_row + jj]); b0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 1)*nbyte_row + jj]); c0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 2)*nbyte_row + jj]); d0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 3)*nbyte_row + jj]); e0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 4)*nbyte_row + jj]); f0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 5)*nbyte_row + jj]); g0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 6)*nbyte_row + jj]); h0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 7)*nbyte_row + jj]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpacklo_epi8(c0, d0); c1 = _mm_unpacklo_epi8(e0, f0); d1 = _mm_unpacklo_epi8(g0, h0); e1 = _mm_unpackhi_epi8(a0, b0); f1 = _mm_unpackhi_epi8(c0, d0); g1 = _mm_unpackhi_epi8(e0, f0); h1 = _mm_unpackhi_epi8(g0, h0); a0 = _mm_unpacklo_epi16(a1, b1); b0 = _mm_unpacklo_epi16(c1, d1); c0 = _mm_unpackhi_epi16(a1, b1); d0 = _mm_unpackhi_epi16(c1, d1); e0 = _mm_unpacklo_epi16(e1, f1); f0 = _mm_unpacklo_epi16(g1, h1); g0 = _mm_unpackhi_epi16(e1, f1); h0 = _mm_unpackhi_epi16(g1, h1); a1 = _mm_unpacklo_epi32(a0, b0); b1 = _mm_unpackhi_epi32(a0, b0); c1 = _mm_unpacklo_epi32(c0, d0); d1 = _mm_unpackhi_epi32(c0, d0); e1 = _mm_unpacklo_epi32(e0, f0); f1 = _mm_unpackhi_epi32(e0, f0); g1 = _mm_unpacklo_epi32(g0, h0); h1 = _mm_unpackhi_epi32(g0, h0); // We don't have a storeh instruction for integers, so interpret // as a float. Have a storel (_mm_storel_epi64). as = (__m128 *) &a1; bs = (__m128 *) &b1; cs = (__m128 *) &c1; ds = (__m128 *) &d1; es = (__m128 *) &e1; fs = (__m128 *) &f1; gs = (__m128 *) &g1; hs = (__m128 *) &h1; _mm_storel_pi((__m64 *) &out_b[(jj + 0) * nrows + ii], *as); _mm_storel_pi((__m64 *) &out_b[(jj + 2) * nrows + ii], *bs); _mm_storel_pi((__m64 *) &out_b[(jj + 4) * nrows + ii], *cs); _mm_storel_pi((__m64 *) &out_b[(jj + 6) * nrows + ii], *ds); _mm_storel_pi((__m64 *) &out_b[(jj + 8) * nrows + ii], *es); _mm_storel_pi((__m64 *) &out_b[(jj + 10) * nrows + ii], *fs); _mm_storel_pi((__m64 *) &out_b[(jj + 12) * nrows + ii], *gs); _mm_storel_pi((__m64 *) &out_b[(jj + 14) * nrows + ii], *hs); _mm_storeh_pi((__m64 *) &out_b[(jj + 1) * nrows + ii], *as); _mm_storeh_pi((__m64 *) &out_b[(jj + 3) * nrows + ii], *bs); _mm_storeh_pi((__m64 *) &out_b[(jj + 5) * nrows + ii], *cs); _mm_storeh_pi((__m64 *) &out_b[(jj + 7) * nrows + ii], *ds); _mm_storeh_pi((__m64 *) &out_b[(jj + 9) * nrows + ii], *es); _mm_storeh_pi((__m64 *) &out_b[(jj + 11) * nrows + ii], *fs); _mm_storeh_pi((__m64 *) &out_b[(jj + 13) * nrows + ii], *gs); _mm_storeh_pi((__m64 *) &out_b[(jj + 15) * nrows + ii], *hs); } for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj]; out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj]; out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj]; out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj]; out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj]; out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj]; out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj]; out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. const char* in_b = (const char*) in; uint16_t* out_ui16 = (uint16_t*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; __m128i xmm; int32_t bt; if (elem_size % 2) { bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size); } else { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) { xmm = _mm_loadu_si128((__m128i *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = _mm_movemask_epi8(xmm); xmm = _mm_slli_epi16(xmm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); out_ui16[ind / 2] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_SSE(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_SSE(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USESSE2 int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) { return -11; } int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } #endif // #ifdef USESSE2 /* ---- Code that requires AVX2. Intel Haswell (2013) and later. ---- */ /* ---- Worker code that uses AVX2 ---- * * The following code makes use of the AVX2 instruction set and specialized * 32 byte registers. The AVX2 instructions are present on newer x86 * processors. The first Intel processor microarchitecture supporting AVX2 was * Haswell (2013). * */ #ifdef USEAVX2 /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; int32_t* out_i32; size_t nbyte = elem_size * size; int64_t count; __m256i ymm; int32_t bt; for (ii = 0; ii + 31 < nbyte; ii += 32) { ymm = _mm256_loadu_si256((__m256i *) &in_b[ii]); for (kk = 0; kk < 8; kk++) { bt = _mm256_movemask_epi8(ymm); ymm = _mm256_slli_epi16(ymm, 1); out_i32 = (int32_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_i32 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 32); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_AVX(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { size_t hh, ii, jj, kk, mm; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size, elem_size); __m256i ymm_0[8]; __m256i ymm_1[8]; __m256i ymm_storeage[8][4]; for (jj = 0; jj + 31 < nbyte_row; jj += 32) { for (ii = 0; ii + 3 < elem_size; ii += 4) { for (hh = 0; hh < 4; hh ++) { for (kk = 0; kk < 8; kk ++){ ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[ (ii * 8 + hh * 8 + kk) * nbyte_row + jj]); } for (kk = 0; kk < 4; kk ++){ ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); } for (kk = 0; kk < 2; kk ++){ for (mm = 0; mm < 2; mm ++){ ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16( ymm_1[kk * 4 + mm * 2], ymm_1[kk * 4 + mm * 2 + 1]); ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16( ymm_1[kk * 4 + mm * 2], ymm_1[kk * 4 + mm * 2 + 1]); } } for (kk = 0; kk < 4; kk ++){ ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); } for (kk = 0; kk < 8; kk ++){ ymm_storeage[kk][hh] = ymm_1[kk]; } } for (mm = 0; mm < 8; mm ++) { for (kk = 0; kk < 4; kk ++){ ymm_0[kk] = ymm_storeage[mm][kk]; } ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]); ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]); ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]); ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]); ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32); ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32); ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49); ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]); } } } for (ii = 0; ii < nrows; ii ++ ) { for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. const char* in_b = (const char*) in; char* out_b = (char*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; __m256i ymm; int32_t bt; if (elem_size % 4) { return bshuf_shuffle_bit_eightelem_SSE(in, out, size, elem_size); } else { for (jj = 0; jj + 31 < 8 * elem_size; jj += 32) { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { ymm = _mm256_loadu_si256((__m256i *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = _mm256_movemask_epi8(ymm); ymm = _mm256_slli_epi16(ymm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); * (int32_t *) &out_b[ind] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_AVX(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_AVX(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USEAVX2 int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } #endif // #ifdef USEAVX2 /* ---- Drivers selecting best instruction set at compile time. ---- */ int64_t bshuf_trans_bit_elem(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; #ifdef USEAVX2 count = bshuf_trans_bit_elem_AVX(in, out, size, elem_size); #elif defined(USESSE2) count = bshuf_trans_bit_elem_SSE(in, out, size, elem_size); #elif defined(USEARMNEON) count = bshuf_trans_bit_elem_NEON(in, out, size, elem_size); #else count = bshuf_trans_bit_elem_scal(in, out, size, elem_size); #endif return count; } int64_t bshuf_untrans_bit_elem(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; #ifdef USEAVX2 count = bshuf_untrans_bit_elem_AVX(in, out, size, elem_size); #elif defined(USESSE2) count = bshuf_untrans_bit_elem_SSE(in, out, size, elem_size); #elif defined(USEARMNEON) count = bshuf_untrans_bit_elem_NEON(in, out, size, elem_size); #else count = bshuf_untrans_bit_elem_scal(in, out, size, elem_size); #endif return count; } /* ---- Wrappers for implementing blocking ---- */ /* Wrap a function for processing a single block to process an entire buffer in * parallel. */ int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, const void* in, void* out, \ const size_t size, const size_t elem_size, size_t block_size) { omp_size_t ii = 0; int64_t err = 0; int64_t count, cum_count=0; size_t last_block_size; size_t leftover_bytes; size_t this_iter; char *last_in; char *last_out; ioc_chain C; ioc_init(&C, in, out); if (block_size == 0) { block_size = bshuf_default_block_size(elem_size); } if (block_size % BSHUF_BLOCKED_MULT) return -81; #if defined(_OPENMP) #pragma omp parallel for schedule(dynamic, 1) \ private(count) reduction(+ : cum_count) #endif for (ii = 0; ii < (omp_size_t)( size / block_size ); ii ++) { count = fun(&C, block_size, elem_size); if (count < 0) err = count; cum_count += count; } last_block_size = size % block_size; last_block_size = last_block_size - last_block_size % BSHUF_BLOCKED_MULT; if (last_block_size) { count = fun(&C, last_block_size, elem_size); if (count < 0) err = count; cum_count += count; } if (err < 0) return err; leftover_bytes = size % BSHUF_BLOCKED_MULT * elem_size; //this_iter; last_in = (char *) ioc_get_in(&C, &this_iter); ioc_set_next_in(&C, &this_iter, (void *) (last_in + leftover_bytes)); last_out = (char *) ioc_get_out(&C, &this_iter); ioc_set_next_out(&C, &this_iter, (void *) (last_out + leftover_bytes)); memcpy(last_out, last_in, leftover_bytes); ioc_destroy(&C); return cum_count + leftover_bytes; } /* Bitshuffle a single block. */ int64_t bshuf_bitshuffle_block(ioc_chain *C_ptr, \ const size_t size, const size_t elem_size) { size_t this_iter; const void *in; void *out; int64_t count; in = ioc_get_in(C_ptr, &this_iter); ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + size * elem_size)); out = ioc_get_out(C_ptr, &this_iter); ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + size * elem_size)); count = bshuf_trans_bit_elem(in, out, size, elem_size); return count; } /* Bitunshuffle a single block. */ int64_t bshuf_bitunshuffle_block(ioc_chain* C_ptr, \ const size_t size, const size_t elem_size) { size_t this_iter; const void *in; void *out; int64_t count; in = ioc_get_in(C_ptr, &this_iter); ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + size * elem_size)); out = ioc_get_out(C_ptr, &this_iter); ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + size * elem_size)); count = bshuf_untrans_bit_elem(in, out, size, elem_size); return count; } /* Write a 64 bit unsigned integer to a buffer in big endian order. */ void bshuf_write_uint64_BE(void* buf, uint64_t num) { int ii; uint8_t* b = (uint8_t*) buf; uint64_t pow28 = 1 << 8; for (ii = 7; ii >= 0; ii--) { b[ii] = num % pow28; num = num / pow28; } } /* Read a 64 bit unsigned integer from a buffer big endian order. */ uint64_t bshuf_read_uint64_BE(void* buf) { int ii; uint8_t* b = (uint8_t*) buf; uint64_t num = 0, pow28 = 1 << 8, cp = 1; for (ii = 7; ii >= 0; ii--) { num += b[ii] * cp; cp *= pow28; } return num; } /* Write a 32 bit unsigned integer to a buffer in big endian order. */ void bshuf_write_uint32_BE(void* buf, uint32_t num) { int ii; uint8_t* b = (uint8_t*) buf; uint32_t pow28 = 1 << 8; for (ii = 3; ii >= 0; ii--) { b[ii] = num % pow28; num = num / pow28; } } /* Read a 32 bit unsigned integer from a buffer big endian order. */ uint32_t bshuf_read_uint32_BE(const void* buf) { int ii; uint8_t* b = (uint8_t*) buf; uint32_t num = 0, pow28 = 1 << 8, cp = 1; for (ii = 3; ii >= 0; ii--) { num += b[ii] * cp; cp *= pow28; } return num; } /* ---- Public functions ---- * * See header file for description and usage. * */ size_t bshuf_default_block_size(const size_t elem_size) { // This function needs to be absolutely stable between versions. // Otherwise encoded data will not be decodable. size_t block_size = BSHUF_TARGET_BLOCK_SIZE_B / elem_size; // Ensure it is a required multiple. block_size = (block_size / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT; return MAX(block_size, BSHUF_MIN_RECOMMEND_BLOCK); } int64_t bshuf_bitshuffle(const void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { return bshuf_blocked_wrap_fun(&bshuf_bitshuffle_block, in, out, size, elem_size, block_size); } int64_t bshuf_bitunshuffle(const void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { return bshuf_blocked_wrap_fun(&bshuf_bitunshuffle_block, in, out, size, elem_size, block_size); } #undef TRANS_BIT_8X8 #undef TRANS_ELEM_TYPE #undef MAX #undef CHECK_MULT_EIGHT #undef CHECK_ERR_FREE #undef USESSE2 #undef USEAVX2
nr_numint.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <assert.h> #include "config.h" #include "gto/grid_ao_drv.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" #define BOXSIZE 56 int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice, int *ao_loc) { if (non0table == NULL || shls_slice == NULL || ao_loc == NULL) { return 0; } const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; int bas_id; int box_id = 0; int bound = BOXSIZE; int has0 = 0; empty[box_id] = 1; for (bas_id = sh0; bas_id < sh1; bas_id++) { empty[box_id] &= !non0table[bas_id]; if (ao_loc[bas_id] == bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = 1; } else if (ao_loc[bas_id] > bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = !non0table[bas_id]; } } return has0; } static void dot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int bgrids, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; double beta = 0; if (has0) { int box_id, blen, i, j; size_t b0; for (box_id = 0; box_id < nbox; box_id++) { if (!empty[box_id]) { b0 = box_id * BOXSIZE; blen = MIN(nao-b0, BOXSIZE); dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen, &D1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc, &beta, vm, &ngrids); beta = 1.0; } } if (beta == 0) { // all empty for (i = 0; i < nocc; i++) { for (j = 0; j < bgrids; j++) { vm[i*ngrids+j] = 0; } } } } else { dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao, &D1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids); } } /* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */ void VXCdot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int nbas, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; #pragma omp parallel { int ip, ib; #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_dm(vm+ip, ao+ip, dm, nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE), non0table+ib*nbas, shls_slice, ao_loc); } } } /* vv[n,m] = ao1[n,ngrids] * ao2[m,ngrids] */ static void dot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int bgrids, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; if (has0) { int ib, jb, leni, lenj; int j1 = nbox; size_t b0i, b0j; for (ib = 0; ib < nbox; ib++) { if (!empty[ib]) { b0i = ib * BOXSIZE; leni = MIN(nao-b0i, BOXSIZE); if (hermi) { j1 = ib + 1; } for (jb = 0; jb < j1; jb++) { if (!empty[jb]) { b0j = jb * BOXSIZE; lenj = MIN(nao-b0j, BOXSIZE); dgemm_(&TRANS_T, &TRANS_N, &lenj, &leni, &bgrids, &D1, ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids, &D1, vv+b0i*nao+b0j, &nao); } } } } } else { dgemm_(&TRANS_T, &TRANS_N, &nao, &nao, &bgrids, &D1, ao2, &ngrids, ao1, &ngrids, &D1, vv, &nao); } } /* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */ void VXCdot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int nbas, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; memset(vv, 0, sizeof(double) * nao * nao); #pragma omp parallel { int ip, ib; double *v_priv = calloc(nao*nao+2, sizeof(double)); #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_ao(v_priv, ao1+ip, ao2+ip, nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi, non0table+ib*nbas, shls_slice, ao_loc); } #pragma omp critical { for (ip = 0; ip < nao*nao; ip++) { vv[ip] += v_priv[ip]; } } free(v_priv); } if (hermi != 0) { NPdsymm_triu(nao, vv, hermi); } } // 'nip,np->ip' void VXC_dscale_ao(double *aow, double *ao, double *wv, int comp, int nao, int ngrids) { #pragma omp parallel { size_t Ngrids = ngrids; size_t ao_size = nao * Ngrids; int i, j, ic; double *pao = ao; #pragma omp for schedule(static) for (i = 0; i < nao; i++) { pao = ao + i * Ngrids; for (j = 0; j < Ngrids; j++) { aow[i*Ngrids+j] = pao[j] * wv[j]; } for (ic = 1; ic < comp; ic++) { for (j = 0; j < Ngrids; j++) { aow[i*Ngrids+j] += pao[ic*ao_size+j] * wv[ic*Ngrids+j]; } } } } } // 'ip,ip->p' void VXC_dcontract_rho(double *rho, double *bra, double *ket, int nao, int ngrids) { #pragma omp parallel { size_t Ngrids = ngrids; int nthread = omp_get_num_threads(); int blksize = MAX((Ngrids+nthread-1) / nthread, 1); int ib, b0, b1, i, j; #pragma omp for for (ib = 0; ib < nthread; ib++) { b0 = ib * blksize; b1 = MIN(b0 + blksize, ngrids); for (j = b0; j < b1; j++) { rho[j] = bra[j] * ket[j]; } for (i = 1; i < nao; i++) { for (j = b0; j < b1; j++) { rho[j] += bra[i*Ngrids+j] * ket[i*Ngrids+j]; } } } } }
GB_atomics.h
//------------------------------------------------------------------------------ // GB_atomics.h: definitions for atomic operations //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // All atomic operations used by SuiteSparse:GraphBLAS appear in this file. // These atomic operations assume either an ANSI C11 compiler that supports // OpenMP 3.1 or later, or Microsoft Visual Studio on 64-bit Windows (which // only supports OpenMP 2.0). SuiteSparse:GraphBLAS is not supported on 32-bit // Windows. #ifndef GB_ATOMICS_H #define GB_ATOMICS_H #include "GB.h" //------------------------------------------------------------------------------ // atomic updates //------------------------------------------------------------------------------ // Whenever possible, the OpenMP pragma is used with a clause (as introduced in // OpenMP 3.1), as follow: // // #pragma omp atomic [clause] // // where [clause] is read, write, update, or capture. // // Microsoft Visual Studio only supports OpenMP 2.0, which does not have the // [clause]. Without the [clause], #pragma omp atomic is like // #pragma omp atomic update, but the expression can only be one of: // // x binop= expression // x++ // ++x // x-- // --x // // where binop is one of these operators: + * - / & ^ | << >> // // OpenMP 3.0 and later support additional options for the "update" clause, // but SuiteSparse:GraphBLAS uses only this form: // // x binop= expression // // where binop is: + * & ^ | // // This atomic update is used for the PLUS, TIMES, LAND, LXOR, and LOR monoids, // when applied to the built-in types. For PLUS and TIMES, these are the 10 // types INTx, UINTx, FP32, FP64 (for x = 8, 16, 32, and 64). For the boolean // monoids, only the BOOL type is used. // // As a result, the atomic updates are the same for gcc and icc (which support // OpenMP 3.0 or later) with the "update" clause. For MS Visual Studio, the // "update" clause is removed since it supports OpenMP 2.0. #if ( _OPENMP >= 201307 ) // OpenMP 4.0 or later // #define GB_ATOMIC_UPDATE GB_PRAGMA (omp atomic update seq_cst) #define GB_ATOMIC_UPDATE GB_PRAGMA (omp atomic update) #elif ( _OPENMP >= 201107 ) // OpenMP 3.1 #define GB_ATOMIC_UPDATE GB_PRAGMA (omp atomic update) #elif ( _OPENMP >= 199810 ) // OpenMP 1.0 to 3.0: no optional clauses, "update" is assumed #define GB_ATOMIC_UPDATE GB_PRAGMA (omp atomic) #else // no OpenMP at all #define GB_ATOMIC_UPDATE #endif //------------------------------------------------------------------------------ // atomic read and write //------------------------------------------------------------------------------ // x86_64: no atomic read/write is needed. // ARM, Power8/9, and others need the explicit atomic read/write. // In Microsoft Visual Studio, simple reads and writes to properly aligned // 64-bit values are already atomic on 64-bit Windows for any architecture // supported by Windows (any Intel or ARM architecture). See: // https://docs.microsoft.com/en-us/windows/win32/sync/interlocked-variable-access // SuiteSparse:GraphBLAS is not supported on 32-bit Windows. Thus, there // is no need for atomic reads/writes when compiling GraphBLAS on Windows // with MS Visual Studio. #if GBX86 // x86_64: no atomic read/write is needed. #define GB_ATOMIC_READ #define GB_ATOMIC_WRITE #elif ( _OPENMP >= 201107 ) // OpenMP 3.1 and later have atomic reads and writes #define GB_ATOMIC_READ GB_PRAGMA (omp atomic read) #define GB_ATOMIC_WRITE GB_PRAGMA (omp atomic write) #else // OpenMP 3.0 or earlier, or no OpenMP at all #define GB_ATOMIC_READ #define GB_ATOMIC_WRITE #endif //------------------------------------------------------------------------------ // flush //------------------------------------------------------------------------------ #if defined ( _OPENMP ) // All versions of OpenMP have the #pragma omp flush #define GB_OMP_FLUSH GB_PRAGMA (omp flush) #else // no OpenMP at all #define GB_OMP_FLUSH #endif //------------------------------------------------------------------------------ // atomic capture //------------------------------------------------------------------------------ // An atomic capture loads the prior value of the target into a thread-local // result, and then overwrites the target with the new value. The target is a // value that is shared between threads. The value and result arguments are // thread-local. SuiteSparse:GraphBLAS uses four atomic capture methods, // defined below, of the form: // // { result = target ; target = value ; } for int64_t and int8_t // { result = target ; target |= value ; } for int64_t // { result = target++ ; } for int64_t // // OpenMP 3.1 and later supports atomic captures with a "capture" clause: // // #pragma omp atomic capture // { result = target ; target = value ; } // // or with a binary operator // // #pragma omp atomic capture // { result = target ; target binop= value ; } // // MS Visual Studio supports only OpenMP 2.0, and does not support any // "capture" clause. Thus, on Windows, _InterlockedExchange* and // _InterlockedOr* functions are used instead, as described here: // // https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedexchange-intrinsic-functions // https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions #if ( _OPENMP >= 201307 ) // OpenMP 4.0 or later // #define GB_ATOMIC_CAPTURE GB_PRAGMA (omp atomic capture seq_cst) #define GB_ATOMIC_CAPTURE GB_PRAGMA (omp atomic capture) #elif ( _OPENMP >= 201107 ) // OpenMP 3.1 #define GB_ATOMIC_CAPTURE GB_PRAGMA (omp atomic capture) #elif ( _OPENMP >= 199810 ) // OpenMP 1.0 to 3.0: generate an intentional compile-time error if any // attempt is made to use the atomic capture. #define GB_ATOMIC_CAPTURE atomic capture not available #else // no OpenMP at all #define GB_ATOMIC_CAPTURE #endif //-------------------------------------------------------------------------- // atomic capture for int64_t //-------------------------------------------------------------------------- // int64_t result, target, value ; // do this atomically: { result = target ; target = value ; } #if GB_COMPILER_MSC #define GB_ATOMIC_CAPTURE_INT64(result, target, value) \ { \ result = _InterlockedExchange64 \ ((int64_t volatile *) (&(target)), value) ; \ } #else #define GB_ATOMIC_CAPTURE_INT64(result, target, value) \ { \ GB_ATOMIC_CAPTURE \ { \ result = target ; \ target = value ; \ } \ } #endif //-------------------------------------------------------------------------- // atomic capture for int8_t //-------------------------------------------------------------------------- // int8_t result, target, value ; // do this atomically: { result = target ; target = value ; } #if GB_COMPILER_MSC #define GB_ATOMIC_CAPTURE_INT8(result, target, value) \ { \ result = _InterlockedExchange8 \ ((char volatile *) &(target), value) ; \ } #else #define GB_ATOMIC_CAPTURE_INT8(result, target, value) \ { \ GB_ATOMIC_CAPTURE \ { \ result = target ; \ target = value ; \ } \ } #endif //-------------------------------------------------------------------------- // atomic capture with bitwise OR, for int64_t //-------------------------------------------------------------------------- // int64_t result, target, value ; // do this atomically: { result = target ; target |= value ; } #if GB_COMPILER_MSC #define GB_ATOMIC_CAPTURE_INT64_OR(result, target, value) \ { \ result = _InterlockedOr64 \ ((int64_t volatile *) (&(target)), value) ; \ } #else #define GB_ATOMIC_CAPTURE_INT64_OR(result, target, value) \ { \ GB_ATOMIC_CAPTURE \ { \ result = target ; \ target |= value ; \ } \ } #endif //-------------------------------------------------------------------------- // atomic post-increment //-------------------------------------------------------------------------- // Increment an int64_t value and return the value prior to being // incremented: // // int64_t result = target++ ; // // See // https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedincrement-intrinsic-functions?view=msvc-160 // The MS Visual Studio version computes result = ++target, so result must // be decremented by one. #if GB_COMPILER_MSC #define GB_ATOMIC_CAPTURE_INC64(result,target) \ { \ result = _InterlockedIncrement64 \ ((int64_t volatile *) (&(target))) - 1 ; \ } #else #define GB_ATOMIC_CAPTURE_INC64(result,target) \ { \ GB_ATOMIC_CAPTURE \ result = (target)++ ; \ } #endif //------------------------------------------------------------------------------ // atomic compare-and-exchange //------------------------------------------------------------------------------ // Atomic compare-and-exchange is used to implement the MAX, MIN and EQ // monoids, for the fine-grain saxpy-style matrix multiplication. Ideally, // OpenMP would be used for these atomic operation, but they are not supported. // So compiler-specific functions are used instead. // In gcc, icc, and clang, the atomic compare-and-exchange function // __atomic_compare_exchange computes the following, as a single atomic // operation, where type_t is any 8, 16, 32, or 64 bit scalar type. In // SuiteSparse:GraphBLAS, type_t can be bool, int8_t, uint8_t, int16_t, // uint16_t, int32_t, uint32_t, int64_t, uint64_t, float, or double. // // bool __atomic_compare_exchange // ( // type_t *target, // input/output // type_t *expected, // input/output // type_t *desired, // input only, even though it is a pointer // bool weak, // true, for SuiteSparse:GraphBLAS // int success_memorder, // __ATOMIC_SEQ_CST is used here // int failure_memorder // __ATOMIC_SEQ_CST is used here // ) // { // bool result ; // if (*target == *expected) // { // *target = *desired ; // result = true ; // } // else // { // *expected = *target ; // result = false ; // } // return (result) ; // } // // The generic __atomic_compare_exchange function in gcc (also supported by // icc) computes the above for any of these 8, 16, 32, or 64-bit scalar types // needed in SuiteSparse:GraphBLAS. SuiteSparse:GraphBLAS does not require the // 'expected = target' assignment if the result is false. It ignores the // value of 'expected' after the operation completes. The target, expected, // and desired parameters are all provided as pointers: // // See https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html // Microsoft Visual Studio provides similar but not identical functionality in // the _InterlockedCompareExchange functions, but they are named differently // for different types. Only int8_t, int16_t, int32_t, and int64_t types are // supported. For the int64_t case, the following is performed atomically: // // int64_t _InterlockedCompareExchange64 // ( // int64_t volatile *target, // input/output // int64_t desired // input only // int64_t expected // ) // { // int64_t result = *target ; // if (*target == expected) // { // target = desired ; // } // return (result) ; // } // // It does not assign "expected = target" if the test is false, but // SuiteSparse:GraphBLAS does not require this action. It does not return a // boolean result, but instead returns the original value of (*target). // However, this can be compared with the expected value to obtain the // same boolean result as __atomic_compare_exchange. // // Type punning is used to extend these signed integer types to unsigned // integers of the same number of bytes, and to float and double. #if GB_COMPILER_MSC //-------------------------------------------------------------------------- // GB_PUN: type punning //-------------------------------------------------------------------------- // With type punning, a value is treated as a different type, but with no // typecasting. The address of the variable is first typecasted to a (type // *) pointer, and then the pointer is dereferenced. Type punning is only // needed to extend the atomic compare/exchange functions for Microsoft // Visual Studio. #define GB_PUN(type,value) (*((type *) (&(value)))) //-------------------------------------------------------------------------- // compare/exchange for MS Visual Studio //-------------------------------------------------------------------------- // bool, int8_t, and uint8_t #define GB_ATOMIC_COMPARE_EXCHANGE_8(target, expected, desired) \ ( \ GB_PUN (int8_t, expected) == \ _InterlockedCompareExchange8 ((int8_t volatile *) (target), \ GB_PUN (int8_t, desired), GB_PUN (int8_t, expected)) \ ) // int16_t and uint16_t #define GB_ATOMIC_COMPARE_EXCHANGE_16(target, expected, desired) \ ( \ GB_PUN (int16_t, expected) == \ _InterlockedCompareExchange16 ((int16_t volatile *) (target), \ GB_PUN (int16_t, desired), GB_PUN (int16_t, expected)) \ ) // float, int32_t, and uint32_t #define GB_ATOMIC_COMPARE_EXCHANGE_32(target, expected, desired) \ ( \ GB_PUN (int32_t, expected) == \ _InterlockedCompareExchange ((int32_t volatile *) (target), \ GB_PUN (int32_t, desired), GB_PUN (int32_t, expected)) \ ) // double, int64_t, and uint64_t #define GB_ATOMIC_COMPARE_EXCHANGE_64(target, expected, desired) \ ( \ GB_PUN (int64_t, expected) == \ _InterlockedCompareExchange64 ((int64_t volatile *) (target), \ GB_PUN (int64_t, desired), GB_PUN (int64_t, expected)) \ ) #else //-------------------------------------------------------------------------- // compare/exchange for gcc, icc, and clang on x86 and Power8/9 //-------------------------------------------------------------------------- // the compare/exchange function is generic for any type #define GB_ATOMIC_COMPARE_EXCHANGE_X(target, expected, desired) \ __atomic_compare_exchange (target, &expected, &desired, \ true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) \ // bool, int8_t, and uint8_t #define GB_ATOMIC_COMPARE_EXCHANGE_8(target, expected, desired) \ GB_ATOMIC_COMPARE_EXCHANGE_X(target, expected, desired) // int16_t and uint16_t #define GB_ATOMIC_COMPARE_EXCHANGE_16(target, expected, desired) \ GB_ATOMIC_COMPARE_EXCHANGE_X (target, expected, desired) // float, int32_t, and uint32_t #define GB_ATOMIC_COMPARE_EXCHANGE_32(target, expected, desired) \ GB_ATOMIC_COMPARE_EXCHANGE_X (target, expected, desired) // double, int64_t, and uint64_t #define GB_ATOMIC_COMPARE_EXCHANGE_64(target, expected, desired) \ GB_ATOMIC_COMPARE_EXCHANGE_X (target, expected, desired) #endif #endif
mobilenet_64.c
/* Pretrained MobileNet Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: ZFTurbo/jocare Compilation: gcc -O3 MobileNet_CPU_cifar.c -lm -fopenmp -o MobileNet_CPU_cifar Usage: MobileNet_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: MobileNet_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <ctype.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 64 #define CONV_SIZE 3 #define CONV_LEVELS 27 //#define _CRT_SECURE_NO_WARNINGS 1 // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING \ 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS static double pw_conv_time = 0.0; static double dense_time = 0.0; /****************************************************************************************************************************/ int im_sizes[27] = {64, 64, 16, 16, 16, 16, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2}; int strides[26] = {1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1}; int mem_block_shape[3] = { 1024, 64, 64}; // allocate the absolute maximum amount of space we will need float ***block1; float ***block2; float *****wc; // weights convolution float ***wd; // weights dense float **bd; // biases dense float **batchnorm_weights; float **batchnorm_biases; float **batchnorm_means; // running mean and variance from training used to // estimate population statistics float **batchnorm_vars; int mem_block_dense_shape = { 1024 * 2 * 2}; // size of output from last convolutional layer float *mem_block1_dense; float *mem_block2_dense; #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions /* // ORIGINAL FISHER EXPERIMENTS int cshape[27][4] = { { 64, 3, CONV_SIZE, CONV_SIZE }, { 64, 1, CONV_SIZE, CONV_SIZE }, { 43, 64, 1, 1 }, { 43, 1, CONV_SIZE, CONV_SIZE }, { 85, 43, 1, 1 }, { 85, 1, CONV_SIZE, CONV_SIZE }, { 70, 85, 1, 1 }, { 70, 1, CONV_SIZE, CONV_SIZE }, { 150, 70, 1, 1 }, { 150, 1, CONV_SIZE, CONV_SIZE }, { 69, 150, 1, 1 }, { 69, 1, CONV_SIZE, CONV_SIZE }, { 188, 69, 1, 1 }, { 188, 1, CONV_SIZE, CONV_SIZE }, { 72, 188, 1, 1 }, { 72, 1, CONV_SIZE, CONV_SIZE }, { 122, 72, 1, 1 }, { 122, 1, CONV_SIZE, CONV_SIZE }, { 106, 122, 1, 1 }, { 106, 1, CONV_SIZE, CONV_SIZE }, { 96, 106, 1, 1 }, { 96, 1, CONV_SIZE, CONV_SIZE }, { 81, 96, 1, 1 }, { 81, 1, CONV_SIZE, CONV_SIZE }, { 75, 81, 1, 1 }, { 75, 1, CONV_SIZE, CONV_SIZE }, { 100, 75, 1, 1 } }; int dshape[1][2]= { { 100, 10} }; */ // FIXED 90% ACCURACY EXPERIMENTS int cshape[27][4] = {{64, 3, CONV_SIZE, CONV_SIZE}, {64, 1, CONV_SIZE, CONV_SIZE}, {43, 64, 1, 1}, {43, 1, CONV_SIZE, CONV_SIZE}, {85, 43, 1, 1}, {85, 1, CONV_SIZE, CONV_SIZE}, {70, 85, 1, 1}, {70, 1, CONV_SIZE, CONV_SIZE}, {150, 70, 1, 1}, {150, 1, CONV_SIZE, CONV_SIZE}, {69, 150, 1, 1}, {69, 1, CONV_SIZE, CONV_SIZE}, {188, 69, 1, 1}, {188, 1, CONV_SIZE, CONV_SIZE}, {72, 188, 1, 1}, {72, 1, CONV_SIZE, CONV_SIZE}, {122, 72, 1, 1}, {122, 1, CONV_SIZE, CONV_SIZE}, {106, 122, 1, 1}, {106, 1, CONV_SIZE, CONV_SIZE}, {96, 106, 1, 1}, {96, 1, CONV_SIZE, CONV_SIZE}, {81, 96, 1, 1}, {81, 1, CONV_SIZE, CONV_SIZE}, {75, 81, 1, 1}, {75, 1, CONV_SIZE, CONV_SIZE}, {100, 75, 1, 1} }; int dshape[1][2] = {{100, 10}}; #else // PLAIN int cshape[27][4] = {{64, 3, CONV_SIZE, CONV_SIZE}, {64, 1, CONV_SIZE, CONV_SIZE}, {64, 64, 1, 1}, {64, 1, CONV_SIZE, CONV_SIZE}, {128, 64, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {128, 128, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {256, 128, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {256, 256, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {512, 256, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {1024, 512, 1, 1}, {1024, 1, CONV_SIZE, CONV_SIZE}, {1024, 1024, 1, 1}}; int dshape[1][2] = {{1024, 10}}; #endif // FISHER_PRUNING /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.0; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.0; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; int max_channels = 1024; int max_im_size = 64; block1 = malloc(max_channels * sizeof(float **)); block2 = malloc(max_channels * sizeof(float **)); // allocate block memory for (i = 0; i < max_channels; i++) { block1[i] = malloc(max_im_size * sizeof(float *)); block2[i] = malloc(max_im_size * sizeof(float *)); for (j = 0; j < max_im_size; j++) { block1[i][j] = malloc(max_im_size * sizeof(float)); block2[i][j] = malloc(max_im_size * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t ****)malloc(CONV_LEVELS * sizeof(csr_t ***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t ***)malloc(cshape[l][0] * sizeof(csr_t **)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t **)malloc(cshape[l][1] * sizeof(csr_t *)); } } // wc memory allocated below will be freed in read_weights if // SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS wc = malloc(CONV_LEVELS * sizeof(float ****)); // allocate kernel memory for (l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float ***)); for (i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float *)); for (k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float)); } } } } // allocate batchnorm memory batchnorm_weights = malloc(27 * sizeof(float *)); batchnorm_biases = malloc(27 * sizeof(float *)); batchnorm_means = malloc(27 * sizeof(float *)); batchnorm_vars = malloc(27 * sizeof(float *)); for (l = 0; l < CONV_LEVELS; l++) { batchnorm_weights[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_biases[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_means[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_vars[l] = malloc(cshape[l][0] * sizeof(float)); } wd = malloc(1 * sizeof(float **)); bd = malloc(1 * sizeof(float *)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float *)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // allocate dense memory mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS // Free dense weights for (l = 0; l < 1; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(block1[i][j]); free(block2[i][j]); } free(block1[i]); free(block2[i]); } free(block1); free(block2); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { float dval; int i, j, k, l, m, z; FILE *iin; int total_lvls_read = 0; // printf("\nin_file es: %s\n\n", in_file); iin = fopen(in_file, "r"); if (iin == NULL) { printf("Weights file %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (l = 0; l < CONV_LEVELS; l++) { printf("Read conv block %d weights\n", l); for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { for (m = 0; m < cshape[l][3]; m++) { fscanf(iin, "%f", &dval); wc[l][i][j][k][m] = dval; } } } } total_lvls_read += 1; } for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_means[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_vars[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); // printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { // printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); csr_t *a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); // print_csr(a); wc_sparse[l][i][j] = a; // printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float *****)malloc(1 * sizeof(float ****)); wc_first_conv[l] = (float ****)malloc(cshape[l][0] * sizeof(float ***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float ***)malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float **)malloc(cshape[l][2] * sizeof(float *)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float *)malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created // above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("Image file %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); block1[l][i][j] = dval; } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; float zeropad[size + 2][size + 2]; memset(zeropad, 0, ((size + 2) * (size + 2) * sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i = i + stride) { for (j = 0; j < size; j = j + stride) { sum = zeropad[i][j] * kernel[0][0] + zeropad[i][j + 1] * kernel[0][1] + zeropad[i][j + 2] * kernel[0][2] + zeropad[i + 1][j] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } } /****************************************************************************************************************************/ /****************************************************************************************************************************/ void pointwise_convolution(float ****point_kernel, float ***block2, float ***block1, int input_channels, int output_channels, int image_size) { struct timeval start, end; gettimeofday(&start, NULL); int i, j, k, l; float sum; for (i = 0; i < output_channels; i++) { for (j = 0; j < image_size; j++) { for (k = 0; k < image_size; k++) { sum = 0.; for (l = 0; l < input_channels; l++) { sum += block2[l][j][k] * point_kernel[i][l][0] [0]; // 0 because they are always 1x1 filters } block1[i][j][k] = sum; } } } gettimeofday(&end, NULL); pw_conv_time += get_seconds(start, end); } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, float *mean, float *var, int num_channels, int image_size) { int channel, i, j; // ((x - mean) * invstd) * w + b #pragma omp parallel for private(channel, i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (channel = 0; channel < num_channels; channel++) { float invstd = 1. / sqrt(var[channel] + 0.000001); for (i = 0; i < image_size; i++) { for (j = 0; j < image_size; j++) { out[channel][i][j] = (weights[channel] * invstd) * in[channel][i][j] + (bias[channel] - ((weights[channel] * mean[channel]) * invstd)); // out[channel][i][j] = ((in[channel][i][j] - mean[channel]) * invstd) * // weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ void depthwise_convolution(float ***block1, float ***block2, float ****depth_kernel, float ****point_kernel, int level) { int i, j; int input_channels = cshape[level][0]; int output_channels = cshape[level + 1][0]; // printf("level %i: %i ==> %i\n", level, input_channels, output_channels); #pragma omp parallel for private(i) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < input_channels; i++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(block1[i], wc_sparse[level][i][0], block2[i], im_sizes[level], strides[level]); #else convolution_3_x_3(block1[i], depth_kernel[i][0], block2[i], im_sizes[level], strides[level]); #endif } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], input_channels, im_sizes[level + 1]); reset_mem_block(block2); level++; // now do linear combination of the elements in output and write them back // into the first memory block #if SPARSE_CONVOLUTIONS #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < output_channels; i++) { for (j = 0; j < input_channels; j++) { pointwise_convolution_sparse(block2[j], wc_sparse[level][i][j], block1[j], im_sizes[level]); } } #else pointwise_convolution(point_kernel, block1, block2, input_channels, output_channels, im_sizes[level]); #endif batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], output_channels, im_sizes[level + 1]); reset_mem_block(block2); } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; // printf("%f\n", out[i]); if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { struct timeval start, end; gettimeofday(&start, NULL); int i, j; for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } gettimeofday(&end, NULL); dense_time += get_seconds(start, end); } /****************************************************************************************************************************/ void write_out_block(int layer, float ***block) { int layer_name = layer; // * 2 - 1; char filename[16]; sprintf(filename, "outputs/output%d", layer_name); FILE *f = fopen(filename, "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < 64; i++) { for (int j = 0; j < mem_block_shape[1]; j++) { for (int k = 0; k < mem_block_shape[2]; k++) { fprintf(f, "%f \n", block[i][j][k]); } } } fclose(f); } /****************************************************************************************************************************/ void write_out_layer(int layer) { int layer_name = layer; // * 2 - 1; char filename[7]; sprintf(filename, "layer%d", layer_name); FILE *f = fopen(filename, "w"); int depth = 1; if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int o = 0; o < cshape[layer][0]; o++) { for (int i = 0; i < cshape[layer][1]; i++) { for (int k_h = 0; k_h < cshape[layer][2]; k_h++) { for (int k_w = 0; k_w < cshape[layer][3]; k_w++) { fprintf(f, "%f ", wc[layer][o][i][k_h][k_w]); } } fprintf(f, "\n"); } } fclose(f); layer_name = layer + 1; char filename2[7]; sprintf(filename2, "layer%d", layer_name); // get batchnorms FILE *f2 = fopen(filename2, "w"); if (f2 == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_weights[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_biases[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_means[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_vars[layer][i]); } fclose(f); } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c = 0; if (only_convolution == 1) { // for (i = 0; i < 512*7*7; i++) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g\n", mem_block1_dense[i]); } } else { double maximum = -1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g\n", mem_block2_dense[i]); if (mem_block1_dense[i] > maximum) { maximum = mem_block2_dense[i]; c = i + 1; } } fprintf(out, "\n"); printf("This image depicts class: %d\n", c); } } /****************************************************************************************************************************/ void get_mobilenet_predict() { int level = 0; int i, j; // normal convolution #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(block1[j], wc_sparse[level][i][j], block2[i], im_sizes[level], 1); #else convolution_3_x_3(block1[j], wc[level][i][j], block2[i], im_sizes[level], 1); #endif } } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], 64, 64); reset_mem_block(block2); // depthwise convolutions for (level = 1; level < (CONV_LEVELS - 1); level = level + 2) { depthwise_convolution(block1, block2, wc[level], wc[level + 1], (level)); } // flatten flatten(block1, mem_block1_dense, cshape[level][0], im_sizes[level], im_sizes[level]); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 0); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf( "Usage: <program.exe> <weights file> <images list file> <output file> " "<only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; // printf("%s\n", weights_file); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = 20; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { pw_conv_time = 0.0; dense_time = 0.0; fgets(buf, 1024, file_list); if (strlen(buf) == 0) { break; } // printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); gettimeofday(&tStart, NULL); get_mobilenet_predict(); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); printf("pw_conv time: %.3lf sec\n", pw_conv_time); printf("dense time: %.3lf sec\n", dense_time); output_predictions(results, only_convolution, 1024, 1); } // free_memory(); fclose(file_list); return 0; }
TBBHashBackend.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <tbb/concurrent_unordered_map.h> #include <limits> #include <unordered_map> #include "open3d/core/hashmap/CPU/CPUHashBackendBufferAccessor.hpp" #include "open3d/core/hashmap/DeviceHashBackend.h" #include "open3d/utility/Parallel.h" namespace open3d { namespace core { template <typename Key, typename Hash, typename Eq> class TBBHashBackend : public DeviceHashBackend { public: TBBHashBackend(int64_t init_capacity, int64_t key_dsize, const std::vector<int64_t>& value_dsizes, const Device& device); ~TBBHashBackend(); void Reserve(int64_t capacity) override; void Insert(const void* input_keys, const std::vector<const void*>& input_values_soa, buf_index_t* output_buf_indices, bool* output_masks, int64_t count) override; void Find(const void* input_keys, buf_index_t* output_buf_indices, bool* output_masks, int64_t count) override; void Erase(const void* input_keys, bool* output_masks, int64_t count) override; int64_t GetActiveIndices(buf_index_t* output_indices) override; void Clear() override; int64_t Size() const override; int64_t GetBucketCount() const override; std::vector<int64_t> BucketSizes() const override; float LoadFactor() const override; std::shared_ptr<tbb::concurrent_unordered_map<Key, buf_index_t, Hash, Eq>> GetImpl() const { return impl_; } void Allocate(int64_t capacity) override; void Free() override{}; protected: std::shared_ptr<tbb::concurrent_unordered_map<Key, buf_index_t, Hash, Eq>> impl_; std::shared_ptr<CPUHashBackendBufferAccessor> buffer_accessor_; }; template <typename Key, typename Hash, typename Eq> TBBHashBackend<Key, Hash, Eq>::TBBHashBackend( int64_t init_capacity, int64_t key_dsize, const std::vector<int64_t>& value_dsizes, const Device& device) : DeviceHashBackend(init_capacity, key_dsize, value_dsizes, device) { Allocate(init_capacity); } template <typename Key, typename Hash, typename Eq> TBBHashBackend<Key, Hash, Eq>::~TBBHashBackend() {} template <typename Key, typename Hash, typename Eq> int64_t TBBHashBackend<Key, Hash, Eq>::Size() const { return impl_->size(); } template <typename Key, typename Hash, typename Eq> void TBBHashBackend<Key, Hash, Eq>::Find(const void* input_keys, buf_index_t* output_buf_indices, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); #pragma omp parallel for num_threads(utility::EstimateMaxThreads()) for (int64_t i = 0; i < count; ++i) { const Key& key = input_keys_templated[i]; auto iter = impl_->find(key); bool flag = (iter != impl_->end()); output_masks[i] = flag; output_buf_indices[i] = flag ? iter->second : 0; } } template <typename Key, typename Hash, typename Eq> void TBBHashBackend<Key, Hash, Eq>::Erase(const void* input_keys, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); for (int64_t i = 0; i < count; ++i) { const Key& key = input_keys_templated[i]; auto iter = impl_->find(key); bool flag = (iter != impl_->end()); output_masks[i] = flag; if (flag) { buffer_accessor_->DeviceFree(iter->second); impl_->unsafe_erase(iter); } } } template <typename Key, typename Hash, typename Eq> int64_t TBBHashBackend<Key, Hash, Eq>::GetActiveIndices( buf_index_t* output_buf_indices) { int64_t count = impl_->size(); int64_t i = 0; for (auto iter = impl_->begin(); iter != impl_->end(); ++iter, ++i) { output_buf_indices[i] = static_cast<int64_t>(iter->second); } return count; } template <typename Key, typename Hash, typename Eq> void TBBHashBackend<Key, Hash, Eq>::Clear() { impl_->clear(); this->buffer_->ResetHeap(); } template <typename Key, typename Hash, typename Eq> void TBBHashBackend<Key, Hash, Eq>::Reserve(int64_t capacity) { impl_->rehash(std::ceil(capacity / impl_->max_load_factor())); } template <typename Key, typename Hash, typename Eq> int64_t TBBHashBackend<Key, Hash, Eq>::GetBucketCount() const { return impl_->unsafe_bucket_count(); } template <typename Key, typename Hash, typename Eq> std::vector<int64_t> TBBHashBackend<Key, Hash, Eq>::BucketSizes() const { int64_t bucket_count = impl_->unsafe_bucket_count(); std::vector<int64_t> ret; for (int64_t i = 0; i < bucket_count; ++i) { ret.push_back(impl_->unsafe_bucket_size(i)); } return ret; } template <typename Key, typename Hash, typename Eq> float TBBHashBackend<Key, Hash, Eq>::LoadFactor() const { return impl_->load_factor(); } template <typename Key, typename Hash, typename Eq> void TBBHashBackend<Key, Hash, Eq>::Insert( const void* input_keys, const std::vector<const void*>& input_values_soa, buf_index_t* output_buf_indices, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); size_t n_values = input_values_soa.size(); #pragma omp parallel for num_threads(utility::EstimateMaxThreads()) for (int64_t i = 0; i < count; ++i) { output_buf_indices[i] = 0; output_masks[i] = false; const Key& key = input_keys_templated[i]; // Try to insert a dummy buffer index. auto res = impl_->insert({key, 0}); // Lazy copy key value pair to buffer only if succeeded if (res.second) { buf_index_t buf_index = buffer_accessor_->DeviceAllocate(); void* key_ptr = buffer_accessor_->GetKeyPtr(buf_index); // Copy templated key to buffer *static_cast<Key*>(key_ptr) = key; // Copy/reset non-templated value in buffer for (size_t j = 0; j < n_values; ++j) { uint8_t* dst_value = static_cast<uint8_t*>( buffer_accessor_->GetValuePtr(buf_index, j)); const uint8_t* src_value = static_cast<const uint8_t*>(input_values_soa[j]) + this->value_dsizes_[j] * i; std::memcpy(dst_value, src_value, this->value_dsizes_[j]); } // Update from dummy 0 res.first->second = buf_index; // Write to return variables output_buf_indices[i] = buf_index; output_masks[i] = true; } } } template <typename Key, typename Hash, typename Eq> void TBBHashBackend<Key, Hash, Eq>::Allocate(int64_t capacity) { this->capacity_ = capacity; this->buffer_ = std::make_shared<HashBackendBuffer>( this->capacity_, this->key_dsize_, this->value_dsizes_, this->device_); buffer_accessor_ = std::make_shared<CPUHashBackendBufferAccessor>(*this->buffer_); impl_ = std::make_shared< tbb::concurrent_unordered_map<Key, buf_index_t, Hash, Eq>>( capacity, Hash(), Eq()); } } // namespace core } // namespace open3d
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 4296.0 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *); static ssize_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, % ExceptionInfo *excetion) % % A description of each parameter follows: % % o ConvertPathToPolygon() returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) if (polygon_info->edges[i].points != (PointInfo *) NULL) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { EdgeInfo *p; ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, ExceptionInfo *exception) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo *) NULL); } number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; polygon_info->number_edges=edge+1; points=(PointInfo *) NULL; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } } polygon_info->number_edges=edge; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges, polygon_info->number_edges,sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { EdgeInfo *edge_info; edge_info=polygon_info->edges+i; edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points, edge_info->number_points,sizeof(*edge_info->points)); if (edge_info->points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o ConvertPrimitiveToPath() returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % */ static void LogPathInfo(const PathInfo *path_info) { const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PathInfo *) NULL); } coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=CastDoubleToLong(ceil(edge.y1-0.5)); stop=CastDoubleToLong(floor(edge.y2+0.5)); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; ssize_t x; Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; if (status == MagickFalse) continue; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong( ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor( inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5)); x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); if ((flags & RhoValue) != 0) resolution.x=geometry_info.rho; resolution.y=resolution.x; if ((flags & SigmaValue) != 0) resolution.y=geometry_info.sigma; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask == (Image *) NULL) status=MagickFalse; else { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status&=NegateImage(clip_mask,MagickFalse,exception); } if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; double dx, dy; ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (double) (MaxBezierCoordinates >> 2)) continue; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; Quantum *magick_restrict q; ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat*PerceptibleReciprocal(gradient->radius); } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const double pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ quantum=sizeof(**mvg_info->primitive_info); extent=(double) mvg_info->offset+pad+(PrimitiveExtentPad+1)*(double) quantum; if (extent < (double) *mvg_info->extent) return(MagickTrue); if ((extent >= (double) MAGICK_SSIZE_MAX) || (IsNaN(extent) != 0)) return(MagickFalse); *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) (extent+1),quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i <= (ssize_t) extent; i++) { (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; (*mvg_info->primitive_info)[i].text=(char *) NULL; } return(MagickTrue); } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory((size_t) (PrimitiveExtentPad+1)*quantum); (void) memset(*mvg_info->primitive_info,0,(size_t) ((PrimitiveExtentPad+1)* quantum)); *mvg_info->extent=1; mvg_info->offset=0; return(MagickFalse); } static inline double GetDrawValue(const char *magick_restrict string, char **magick_restrict sentinal) { char **magick_restrict q; double value; q=sentinal; value=InterpretLocaleValue(string,q); sentinal=q; return(value); } static int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=GetDrawValue(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; const char *p; ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=(size_t) PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (number_points+1),sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) (number_points+1)* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if ((graphic_context[n]->render != MagickFalse) && (mvg_class != (const char *) NULL) && (p > primitive)) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } if (LocaleCompare("currentColor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha*=opacity; else graphic_context[n]->fill_alpha=QuantumRange*opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* GetDrawValue(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; } else { graphic_context[n]->fill_alpha=QuantumRange*opacity; graphic_context[n]->stroke_alpha=QuantumRange*opacity; } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo region; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); region.x=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.y=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.width=(size_t) CastDoubleToLong(floor(GetDrawValue( token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.height=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) region.width,(double) region.height,(double) region.x,(double) region.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; (void) GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha*=opacity; else graphic_context[n]->stroke_alpha=QuantumRange*opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,(double) number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(BezierQuantum*(double) primitive_info[j].coordinates); break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=GetDrawValue(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,(double) number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { double dx, dy, maximum_length; if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates/100.0)) ThrowPointExpectedException(keyword,exception); status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (status == 0) break; primitive_info[i].primitive=UndefinedPrimitive; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); /* Sanity check. */ status&=CheckPrimitiveExtent(&mvg_info,ExpandAffine( &graphic_context[n]->affine)); if (status == 0) break; status&=CheckPrimitiveExtent(&mvg_info,(double) graphic_context[n]->stroke_width); if (status == 0) break; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#00000000",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo **) NULL); } (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info,exception); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); polygon_info[0]=ConvertPathToPolygon(path_info,exception); if (polygon_info[0] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } for (i=1; i < (ssize_t) number_threads; i++) { EdgeInfo *edge_info; ssize_t j; polygon_info[i]=(PolygonInfo *) AcquireMagickMemory( sizeof(*polygon_info[i])); if (polygon_info[i] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } polygon_info[i]->number_edges=0; edge_info=polygon_info[0]->edges; polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory( polygon_info[0]->number_edges,sizeof(*edge_info)); if (polygon_info[i]->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } (void) memcpy(polygon_info[i]->edges,edge_info, polygon_info[0]->number_edges*sizeof(*edge_info)); for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) polygon_info[i]->edges[j].points=(PointInfo *) NULL; polygon_info[i]->number_edges=polygon_info[0]->number_edges; for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) { edge_info=polygon_info[0]->edges+j; polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory( edge_info->number_points,sizeof(*edge_info)); if (polygon_info[i]->edges[j].points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } (void) memcpy(polygon_info[i]->edges[j].points,edge_info->points, edge_info->number_points*sizeof(*edge_info->points)); } } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge) { assert(edge < (ssize_t) polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < (ssize_t) polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; const PointInfo *q; EdgeInfo *p; ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon; distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; EdgeInfo *p; ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(primitive_info,exception); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; artifact=GetImageArtifact(image,"draw:render-bounding-rectangles"); if (IsStringTrue(artifact) != MagickFalse) (void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=CastDoubleToLong(ceil(bounds.y1-0.5)); stop_y=CastDoubleToLong(floor(bounds.y2+0.5)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=CastDoubleToLong(ceil(bounds.x1-0.5)); stop_x=CastDoubleToLong(floor(bounds.x2+0.5)); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) && (y == CastDoubleToLong(ceil(primitive_info->point.y-0.5)))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=CastDoubleToLong(ceil(bounds.y1-0.5)); stop_y=CastDoubleToLong(floor(bounds.y2+0.5)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=CastDoubleToLong(ceil(bounds.x1-0.5)); stop_x=CastDoubleToLong(floor(bounds.x2+0.5)); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; ssize_t i, x; ssize_t coordinates, y; x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status&=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else if (*primitive_info->text != '\0') { MagickBooleanType path_status; struct stat attributes; /* Read composite image. */ (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); (void) SetImageInfo(clone_info,1,exception); (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); if (clone_info->size != (char *) NULL) clone_info->size=DestroyString(clone_info->size); if (clone_info->extract != (char *) NULL) clone_info->extract=DestroyString(clone_info->extract); path_status=GetPathAttributes(clone_info->filename,&attributes); if (path_status != MagickFalse) { if (S_ISCHR(attributes.st_mode) == 0) composite_images=ReadImage(clone_info,exception); else (void) ThrowMagickException(exception,GetMagickModule(), FileOpenError,"UnableToOpenFile","`%s'", clone_info->filename); } else if ((LocaleCompare(clone_info->magick,"ftp") != 0) && (LocaleCompare(clone_info->magick,"http") != 0) && (LocaleCompare(clone_info->magick,"https") != 0)) composite_images=ReadImage(clone_info,exception); else (void) ThrowMagickException(exception,GetMagickModule(), FileOpenError,"UnableToOpenFile","`%s'",clone_info->filename); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=MagickFalse; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5)); y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5)); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status&=SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine,exception); else status&=CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double point_x, point_y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; point_x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); point_y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((point_x < MagickEpsilon) && (point_y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(draw_info,p,exception); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; double cosine, sine; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5* MagickPI+MagickEpsilon))))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) MAGICK_SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) MAGICK_SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=MagickMin(quantum/number_coordinates,BezierQuantum); coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,(double) control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (CheckPrimitiveExtent(mvg_info,coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; PrimitiveInfo *q; ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; PrimitiveInfo *p; ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; double dx, dy; ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((pad_p) > MaxBezierCoordinates) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((pad_q) > MaxBezierCoordinates) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ (void) ThrowMagickException(exception,GetMagickModule(), \ ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x; offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y; closed_path=(fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } DisableMSCWarning(4127) CheckPathExtent(MaxStrokePad,MaxStrokePad); RestoreMSCWarning dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta. q-theta.p)/(2.0*sqrt(PerceptibleReciprocal(mid)))))); DisableMSCWarning(4127) CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); RestoreMSCWarning stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p- theta.q)/(2.0*sqrt((double) (PerceptibleReciprocal(mid))))))); DisableMSCWarning(4127) CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); RestoreMSCWarning stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(32*t2-Nz-1020,1024)),ceild(8*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(16*t1+Nx+29,1024)),floord(32*t2+Nx+28,1024)),floord(8*t3+Nx+4,1024)),floord(32*t1-32*t2+Nz+Nx+27,1024));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),1024*t4+1022),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
calculate_E_field_flat_all_in_one.h
int k_delta[3][3] = {{1,0,0}, {0,1,0}, {0,0,1}}; /* Calculate the electric flux on both faces in the input direction. */ void calculate_E_field_flat_all_in_one(const paramstruct *params,const REAL *auxevol_gfs,REAL *rhs_gfs,const int flux_dirn) { #include "GiRaFFE_standalone_Ccodes/set_Cparameters.h" #pragma omp parallel for for(int i2=NGHOSTS; i2<NGHOSTS+Nxx2; i2++) { for(int i1=NGHOSTS; i1<NGHOSTS+Nxx1; i1++) { for(int i0=NGHOSTS; i0<NGHOSTS+Nxx0; i0++) { // First, we set the index from which we will read memory. indexp1 is incremented by // one point in the direction of reconstruction. These correspond to the faces at at // i-1/2 and i+1/2 int index = IDX3S(i0,i1,i2); int indexp1 = IDX3S(i0+k_delta[flux_dirn][0],i1+k_delta[flux_dirn][1],i2+k_delta[flux_dirn][2]); // Now, we read in memory. We need all components of velocity and magnetic field on both // the left and right sides of the interface at *both* faces. const double Valenciav_rU0 = auxevol_gfs[IDX4ptS(VALENCIAV_RU0GF, index)]; const double Valenciav_rU1 = auxevol_gfs[IDX4ptS(VALENCIAV_RU1GF, index)]; const double Valenciav_rU2 = auxevol_gfs[IDX4ptS(VALENCIAV_RU2GF, index)]; const double B_rU0 = auxevol_gfs[IDX4ptS(B_RU0GF, index)]; const double B_rU1 = auxevol_gfs[IDX4ptS(B_RU1GF, index)]; const double B_rU2 = auxevol_gfs[IDX4ptS(B_RU2GF, index)]; const double Valenciav_lU0 = auxevol_gfs[IDX4ptS(VALENCIAV_LU0GF, index)]; const double Valenciav_lU1 = auxevol_gfs[IDX4ptS(VALENCIAV_LU1GF, index)]; const double Valenciav_lU2 = auxevol_gfs[IDX4ptS(VALENCIAV_LU2GF, index)]; const double B_lU0 = auxevol_gfs[IDX4ptS(B_LU0GF, index)]; const double B_lU1 = auxevol_gfs[IDX4ptS(B_LU1GF, index)]; const double B_lU2 = auxevol_gfs[IDX4ptS(B_LU2GF, index)]; const double Valenciav_rU0_p1 = auxevol_gfs[IDX4ptS(VALENCIAV_RU0GF, indexp1)]; const double Valenciav_rU1_p1 = auxevol_gfs[IDX4ptS(VALENCIAV_RU1GF, indexp1)]; const double Valenciav_rU2_p1 = auxevol_gfs[IDX4ptS(VALENCIAV_RU2GF, indexp1)]; const double B_rU0_p1 = auxevol_gfs[IDX4ptS(B_RU0GF, indexp1)]; const double B_rU1_p1 = auxevol_gfs[IDX4ptS(B_RU1GF, indexp1)]; const double B_rU2_p1 = auxevol_gfs[IDX4ptS(B_RU2GF, indexp1)]; const double Valenciav_lU0_p1 = auxevol_gfs[IDX4ptS(VALENCIAV_LU0GF, indexp1)]; const double Valenciav_lU1_p1 = auxevol_gfs[IDX4ptS(VALENCIAV_LU1GF, indexp1)]; const double Valenciav_lU2_p1 = auxevol_gfs[IDX4ptS(VALENCIAV_LU2GF, indexp1)]; const double B_lU0_p1 = auxevol_gfs[IDX4ptS(B_LU0GF, indexp1)]; const double B_lU1_p1 = auxevol_gfs[IDX4ptS(B_LU1GF, indexp1)]; const double B_lU2_p1 = auxevol_gfs[IDX4ptS(B_LU2GF, indexp1)]; // Calculate the flux vector on each face for each component of the E-field: const REAL F1B2_r = (Valenciav_rU1*B_rU2 - Valenciav_rU2*B_rU1); const REAL F1B2_l = (Valenciav_lU1*B_lU2 - Valenciav_lU2*B_lU1); const REAL F2B0_r = (Valenciav_rU2*B_rU0 - Valenciav_rU0*B_rU2); const REAL F2B0_l = (Valenciav_lU2*B_lU0 - Valenciav_lU0*B_lU2); const REAL F0B1_r = (Valenciav_rU0*B_rU1 - Valenciav_rU1*B_rU0); const REAL F0B1_l = (Valenciav_lU0*B_lU1 - Valenciav_lU1*B_lU0); // Compute the state vector for this flux direction const REAL U_r = B_rU0*k_delta[flux_dirn][0] + B_rU1*k_delta[flux_dirn][1] + B_rU2*k_delta[flux_dirn][2]; const REAL U_l = B_lU0*k_delta[flux_dirn][0] + B_lU1*k_delta[flux_dirn][1] + B_lU2*k_delta[flux_dirn][2]; // Repeat at i+1 // Calculate the flux vector on each face for each component of the E-field: const REAL F1B2_r_p1 = (Valenciav_rU1_p1*B_rU2_p1 - Valenciav_rU2_p1*B_rU1_p1); const REAL F1B2_l_p1 = (Valenciav_lU1_p1*B_lU2_p1 - Valenciav_lU2_p1*B_lU1_p1); const REAL F2B0_r_p1 = (Valenciav_rU2_p1*B_rU0_p1 - Valenciav_rU0_p1*B_rU2_p1); const REAL F2B0_l_p1 = (Valenciav_lU2_p1*B_lU0_p1 - Valenciav_lU0_p1*B_lU2_p1); const REAL F0B1_r_p1 = (Valenciav_rU0_p1*B_rU1_p1 - Valenciav_rU1_p1*B_rU0_p1); const REAL F0B1_l_p1 = (Valenciav_lU0_p1*B_lU1_p1 - Valenciav_lU1_p1*B_lU0_p1); // Compute the state vector for this flux direction const REAL U_r_p1 = B_rU0_p1*k_delta[flux_dirn][0] + B_rU1_p1*k_delta[flux_dirn][1] + B_rU2_p1*k_delta[flux_dirn][2]; const REAL U_l_p1 = B_lU0_p1*k_delta[flux_dirn][0] + B_lU1_p1*k_delta[flux_dirn][1] + B_lU2_p1*k_delta[flux_dirn][2]; // Basic HLLE solver: const REAL FHLL_1B2 = 0.5*(F1B2_r + F1B2_l - (U_r-U_l)); const REAL FHLL_2B0 = 0.5*(F2B0_r + F2B0_l - (U_r-U_l)); const REAL FHLL_0B1 = 0.5*(F0B1_r + F0B1_l - (U_r-U_l)); // Basic HLLE solver, but at the next point: const REAL FHLL_1B2p1 = 0.5*(F1B2_r_p1 + F1B2_l_p1 - (U_r_p1-U_l_p1)); const REAL FHLL_2B0p1 = 0.5*(F2B0_r_p1 + F2B0_l_p1 - (U_r_p1-U_l_p1)); const REAL FHLL_0B1p1 = 0.5*(F0B1_r_p1 + F0B1_l_p1 - (U_r_p1-U_l_p1)); rhs_gfs[IDX4ptS(AD0GF,index)] += 0.25*(FHLL_1B2 + FHLL_1B2p1)*(flux_dirn!=0); // Set to zero for the component in flux_dirn. Is it more efficient to do this sooner? An array-based implementation might be better, too. rhs_gfs[IDX4ptS(AD1GF,index)] += 0.25*(FHLL_2B0 + FHLL_2B0p1)*(flux_dirn!=1); rhs_gfs[IDX4ptS(AD2GF,index)] += 0.25*(FHLL_0B1 + FHLL_0B1p1)*(flux_dirn!=2); } // END LOOP: for(int i0=NGHOSTS; i0<NGHOSTS+Nxx0; i0++) } // END LOOP: for(int i1=NGHOSTS; i1<NGHOSTS+Nxx1; i1++) } // END LOOP: for(int i2=NGHOSTS; i2<NGHOSTS+Nxx2; i2++) }
core_dtrmm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrmm.c, normal z -> d, Fri Sep 28 17:38:23 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_trmm * * Performs a triangular matrix-matrix multiply of the form * * \f[B = \alpha [op(A) \times B] \f], if side = PlasmaLeft or * \f[B = \alpha [B \times op(A)] \f], if side = PlasmaRight * * where op( X ) is one of: * * - op(A) = A or * - op(A) = A^T or * - op(A) = A^T * * alpha is a scalar, B is an m-by-n matrix and A is a unit or non-unit, upper * or lower triangular matrix. * ******************************************************************************* * * @param[in] side * Specifies whether op( A ) appears on the left or on the right of B: * - PlasmaLeft: alpha*op( A )*B * - PlasmaRight: alpha*B*op( A ) * * @param[in] uplo * Specifies whether the matrix A is upper triangular or lower * triangular: * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] transa * Specifies whether the matrix A is transposed, not transposed or * conjugate transposed: * - PlasmaNoTrans: A is transposed; * - PlasmaTrans: A is not transposed; * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] diag * Specifies whether or not A is unit triangular: * - PlasmaNonUnit: A is non-unit triangular; * - PlasmaUnit: A is unit triangular. * * @param[in] m * The number of rows of matrix B. * m >= 0. * * @param[in] n * The number of columns of matrix B. * n >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] A * The triangular matrix A of dimension lda-by-k, where k is m when * side='L' or 'l' and k is n when when side='R' or 'r'. If uplo = * PlasmaUpper, the leading k-by-k upper triangular part of the array * A contains the upper triangular matrix, and the strictly lower * triangular part of A is not referenced. If uplo = PlasmaLower, the * leading k-by-k lower triangular part of the array A contains the * lower triangular matrix, and the strictly upper triangular part of * A is not referenced. If diag = PlasmaUnit, the diagonal elements of * A are also not referenced and are assumed to be 1. * * @param[in] lda * The leading dimension of the array A. When side='L' or 'l', * lda >= max(1,m), when side='R' or 'r' then lda >= max(1,n). * * @param[in,out] B * On entry, the matrix B of dimension ldb-by-n. * On exit, the result of a triangular matrix-matrix multiply * ( alpha*op(A)*B ) or ( alpha*B*op(A) ). * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_dtrmm( plasma_enum_t side, plasma_enum_t uplo, plasma_enum_t transa, plasma_enum_t diag, int m, int n, double alpha, const double *A, int lda, double *B, int ldb) { cblas_dtrmm( CblasColMajor, (CBLAS_SIDE)side, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)transa, (CBLAS_DIAG)diag, m, n, (alpha), A, lda, B, ldb); } /******************************************************************************/ void plasma_core_omp_dtrmm( plasma_enum_t side, plasma_enum_t uplo, plasma_enum_t transa, plasma_enum_t diag, int m, int n, double alpha, const double *A, int lda, double *B, int ldb, plasma_sequence_t *sequence, plasma_request_t *request) { int k = (side == PlasmaLeft) ? m : n; #pragma omp task depend(in:A[0:lda*k]) \ depend(inout:B[0:ldb*n]) { if (sequence->status == PlasmaSuccess) plasma_core_dtrmm(side, uplo, transa, diag, m, n, alpha, A, lda, B, ldb); } }
polybench.c
/** * polybench.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net * License: /LICENSE.OSU.txt */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #include <sched.h> #include <math.h> #ifdef _OPENMP # include <omp.h> #endif /* By default, collect PAPI counters on thread 0. */ #ifndef POLYBENCH_THREAD_MONITOR # define POLYBENCH_THREAD_MONITOR 0 #endif /* Total LLC cache size. By default 32+MB.. */ #ifndef POLYBENCH_CACHE_SIZE_KB # define POLYBENCH_CACHE_SIZE_KB 32770 #endif int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR; double polybench_program_total_flops = 0; #ifdef POLYBENCH_PAPI # include <papi.h> # define POLYBENCH_MAX_NB_PAPI_COUNTERS 96 char* _polybench_papi_eventlist[] = { #include "papi_counters.list" NULL }; int polybench_papi_eventset; int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS]; long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS]; #endif /* Timer code (gettimeofday). */ double polybench_t_start, polybench_t_end; /* Timer code (RDTSC). */ unsigned long long int polybench_c_start, polybench_c_end; static double rtclock() { #ifdef POLYBENCH_TIME struct timeval Tp; int stat; stat = gettimeofday (&Tp, NULL); if (stat != 0) printf ("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); #else return 0; #endif } #ifdef POLYBENCH_CYCLE_ACCURATE_TIMER static unsigned long long int rdtsc() { unsigned long long int ret = 0; unsigned int cycles_lo; unsigned int cycles_hi; __asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi)); ret = (unsigned long long int)cycles_hi << 32 | cycles_lo; return ret; } #endif void polybench_flush_cache() { int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double); double* flush = (double*) calloc (cs, sizeof(double)); int i; double tmp = 0.0; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < cs; i++) tmp += flush[i]; assert (tmp <= 10.0); free (flush); } #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER void polybench_linux_fifo_scheduler() { /* Use FIFO scheduler to limit OS interference. Program must be run as root, and this works only for Linux kernels. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO); sched_setscheduler (0, SCHED_FIFO, &schedParam); } void polybench_linux_standard_scheduler() { /* Restore to standard scheduler policy. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER); sched_setscheduler (0, SCHED_OTHER, &schedParam); } #endif #ifdef POLYBENCH_PAPI static void test_fail(char *file, int line, char *call, int retval) { char buf[128]; memset(buf, '\0', sizeof(buf)); if (retval != 0) fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line); else { fprintf (stdout,"%-40s SKIPPED\n", file); fprintf (stdout,"Line # %d\n", line); } if (retval == PAPI_ESYS) { sprintf (buf, "System error in %s", call); perror (buf); } else if (retval > 0) fprintf (stdout,"Error: %s\n", call); else if (retval == 0) fprintf (stdout,"Error: %s\n", call); else { char errstring[PAPI_MAX_STR_LEN]; PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN); fprintf (stdout,"Error in %s: %s\n", call, errstring); } fprintf (stdout,"\n"); if (PAPI_is_initialized ()) PAPI_shutdown (); exit (1); } void polybench_papi_init() { # ifdef _OPENMP #pragma omp parallel { #pragma omp master { if (omp_get_max_threads () < polybench_papi_counters_threadid) polybench_papi_counters_threadid = omp_get_max_threads () - 1; } #pragma omp barrier if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; polybench_papi_eventset = PAPI_NULL; if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) test_fail (__FILE__, __LINE__, "PAPI_library_init", retval); if ((retval = PAPI_create_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval); int k; for (k = 0; _polybench_papi_eventlist[k]; ++k) { if ((retval = PAPI_event_name_to_code (_polybench_papi_eventlist[k], &(polybench_papi_eventlist[k]))) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval); } polybench_papi_eventlist[k] = 0; # ifdef _OPENMP } } #pragma omp barrier # endif } void polybench_papi_close() { # ifdef _OPENMP #pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval); if (PAPI_is_initialized ()) PAPI_shutdown (); # ifdef _OPENMP } } #pragma omp barrier # endif } int polybench_papi_start_counter(int evid) { # ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); # endif # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval = 1; char descr[PAPI_MAX_STR_LEN]; PAPI_event_info_t evinfo; PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr); if (PAPI_add_event (polybench_papi_eventset, polybench_papi_eventlist[evid]) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_add_event", 1); if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval); if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_start", retval); # ifdef _OPENMP } } #pragma omp barrier # endif return 0; } void polybench_papi_stop_counter(int evid) { # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; long_long values[1]; values[0] = 0; if ((retval = PAPI_read (polybench_papi_eventset, &values[0])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_read", retval); if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_stop", retval); polybench_papi_values[evid] = values[0]; if ((retval = PAPI_remove_event (polybench_papi_eventset, polybench_papi_eventlist[evid])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval); # ifdef _OPENMP } } #pragma omp barrier # endif } void polybench_papi_print() { int verbose = 0; # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num() == polybench_papi_counters_threadid) { #ifdef POLYBENCH_PAPI_VERBOSE verbose = 1; #endif if (verbose) printf ("On thread %d:\n", polybench_papi_counters_threadid); #endif int evid; for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid) { if (verbose) printf ("%s=", _polybench_papi_eventlist[evid]); printf ("%llu ", polybench_papi_values[evid]); if (verbose) printf ("\n"); } printf ("\n"); # ifdef _OPENMP } } #pragma omp barrier # endif } #endif /* ! POLYBENCH_PAPI */ void polybench_prepare_instruments() { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_fifo_scheduler (); #endif } void polybench_timer_start() { polybench_prepare_instruments (); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_start = rtclock (); #else polybench_c_start = rdtsc (); #endif } void polybench_timer_stop() { #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_end = rtclock (); #else polybench_c_end = rdtsc (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_standard_scheduler (); #endif } void polybench_timer_print() { #ifdef POLYBENCH_GFLOPS if (__polybench_program_total_flops == 0) { printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n"); printf ("%0.6lf\n", polybench_t_end - polybench_t_start); } else printf ("%0.2lf\n", (__polybench_program_total_flops / (double)(polybench_t_end - polybench_t_start)) / 1000000000); #else # ifndef POLYBENCH_CYCLE_ACCURATE_TIMER printf ("%0.6f\n", polybench_t_end - polybench_t_start); # else printf ("%Ld\n", polybench_c_end - polybench_c_start); # endif #endif } static void * xmalloc (size_t num) { void* new = NULL; int ret = posix_memalign (&new, 32, num); if (! new || ret) { fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory"); exit (1); } return new; } void* polybench_alloc_data(unsigned long long int n, int elt_size) { /// FIXME: detect overflow! size_t val = n; val *= elt_size; void* ret = xmalloc (val); return ret; }
par_interp.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterp *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int kc; HYPRE_BigInt big_k; HYPRE_Int start; HYPRE_Int sgn; HYPRE_Int c_num; HYPRE_Real diagonal; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int print_level = 0; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; print_level = 1; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of A *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); } index = 0; for (i=0; i < num_cols_A_offd; i++) { for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_k = A_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { A_ext_j[index] = big_k - col_1; A_ext_data[index++] = A_ext_data[j]; } else { kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd); if (kc > -1) { A_ext_j[index] = (HYPRE_BigInt)(-kc-1); A_ext_data[index++] = A_ext_data[j]; } } } A_ext_i[i] = index; } for (i = num_cols_A_offd; i > 0; i--) A_ext_i[i] = A_ext_i[i-1]; if (num_procs > 1) A_ext_i[0] = 0; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; //fine_to_coarse[i] += my_first_cpt+coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; */ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { sum += A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } if (sum != 0) { distribute = A_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of A for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1]; } } } } else { if (num_functions == 1 || dof_func[i] == dof_func[i1]) { diagonal += A_diag_data[jj]; } } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) { diagonal += A_diag_data[jj]; } } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = A_offd_j[jj]; sgn = 1; if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1; for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) { /* in the diagonal block */ if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) /* in the diagonal block */ { if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[-i2-1]] += distribute * A_ext_data[jj1]; } } } else { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { diagonal += A_offd_data[jj]; } } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { diagonal += A_offd_data[jj]; } } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ if (diagonal == 0.0) { if (print_level) { hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i); } for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] = 0.0; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] = 0.0; } } else { for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } } strong_f_marker--; P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) { if (CF_marker[i] == -3) CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpHE * interpolation routine for hyperbolic PDEs * treats weak fine connections like strong fine connections *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int kc; HYPRE_BigInt big_k; HYPRE_Int start; HYPRE_Int sgn; HYPRE_Int c_num; HYPRE_Real diagonal; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of A *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); } index = 0; for (i=0; i < num_cols_A_offd; i++) { for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_k = A_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { A_ext_j[index] = big_k - col_1; A_ext_data[index++] = A_ext_data[j]; } else { kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd); if (kc > -1) { A_ext_j[index] = (HYPRE_BigInt)(-kc-1); A_ext_data[index++] = A_ext_data[j]; } } } A_ext_i[i] = index; } for (i = num_cols_A_offd; i > 0; i--) A_ext_i[i] = A_ext_i[i-1]; if (num_procs > 1) A_ext_i[0] = 0; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and influences i, * distribute a_{i,i1} to C-points that strongly influence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else { sum = zero; /*----------------------------------------------------------- * Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { sum += A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } if (sum != 0) { distribute = A_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of A for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1]; } } } } else { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else { sum = zero; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = A_offd_j[jj]; sgn = 1; if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1; for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) { /* in the diagonal block */ if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) /* in the diagonal block */ { if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[-i2-1]] += distribute * A_ext_data[jj1]; } } } else { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildDirInterp *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real diagonal; HYPRE_Real sum_N_pos, sum_P_pos; HYPRE_Real sum_N_neg, sum_P_neg; HYPRE_Real alfa = 1.0; HYPRE_Real beta = 1.0; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { HYPRE_Int *P_marker, *P_marker_offd; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ sum_N_pos = 0; sum_N_neg = 0; sum_P_pos = 0; sum_P_neg = 0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (num_functions == 1 || dof_func[i1] == dof_func[i]) { if (A_diag_data[jj] > 0) sum_N_pos += A_diag_data[jj]; else sum_N_neg += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; if (A_diag_data[jj] > 0) sum_P_pos += A_diag_data[jj]; else sum_P_neg += A_diag_data[jj]; } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (num_functions == 1 || dof_func_offd[i1] == dof_func[i]) { if (A_offd_data[jj] > 0) sum_N_pos += A_offd_data[jj]; else sum_N_neg += A_offd_data[jj]; } /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; if (A_offd_data[jj] > 0) sum_P_pos += A_offd_data[jj]; else sum_P_neg += A_offd_data[jj]; } } } if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal; if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { if (P_diag_data[jj]> 0) P_diag_data[jj] *= -beta; else P_diag_data[jj] *= -alfa; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { if (P_offd_data[jj]> 0) P_offd_data[jj] *= -beta; else P_offd_data[jj] *= -alfa; } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { HYPRE_Int *P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P, HYPRE_Real trunc_factor, HYPRE_Int max_elmts) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime(); #endif hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P); HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag); HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag); HYPRE_Real *P_diag_data = hypre_CSRMatrixData(P_diag); HYPRE_Int *P_diag_j_new; HYPRE_Real *P_diag_data_new; hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P); HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd); HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd); HYPRE_Real *P_offd_data = hypre_CSRMatrixData(P_offd); HYPRE_Int *P_offd_j_new; HYPRE_Real *P_offd_data_new; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_diag); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(P_diag); HYPRE_Int i, j, start_j; HYPRE_Int ierr = 0; HYPRE_Int next_open; HYPRE_Int now_checking; HYPRE_Int num_lost; HYPRE_Int num_lost_global=0; HYPRE_Int next_open_offd; HYPRE_Int now_checking_offd; HYPRE_Int num_lost_offd; HYPRE_Int num_lost_global_offd; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int num_elmts; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Real max_coef; HYPRE_Real row_sum; HYPRE_Real scale; /* Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. Cum_lost_per_thread * will temporarily store the cumulative number of dropped entries up to * each thread. */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * cum_lost_per_thread; HYPRE_Int * num_lost_per_thread; HYPRE_Int * num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for(i=0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,max_coef,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt) #endif { my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /* Compute each thread's range of rows to truncate and compress. Note, * that i, j and data are all compressed as entries are dropped, but * that the compression only occurs locally over each thread's row * range. P_diag_i is only made globally consistent at the end of this * routine. During the dropping phases, P_diag_i[stop] will point to * the start of the next thread's row range. */ /* my row range */ start = (n_fine/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine/num_threads)*(my_thread_num+1); } /* * Truncate based on truncation tolerance */ if (trunc_factor > 0) { num_lost = 0; num_lost_offd = 0; next_open = P_diag_i[start]; now_checking = P_diag_i[start]; next_open_offd = P_offd_i[start];; now_checking_offd = P_offd_i[start];; for (i = start; i < stop; i++) { max_coef = 0; for (j = P_diag_i[i]; j < P_diag_i[i+1]; j++) max_coef = (max_coef < fabs(P_diag_data[j])) ? fabs(P_diag_data[j]) : max_coef; for (j = P_offd_i[i]; j < P_offd_i[i+1]; j++) max_coef = (max_coef < fabs(P_offd_data[j])) ? fabs(P_offd_data[j]) : max_coef; max_coef *= trunc_factor; start_j = P_diag_i[i]; if (num_lost) P_diag_i[i] -= num_lost; row_sum = 0; scale = 0; for (j = start_j; j < P_diag_i[i+1]; j++) { row_sum += P_diag_data[now_checking]; if (fabs(P_diag_data[now_checking]) < max_coef) { num_lost++; now_checking++; } else { scale += P_diag_data[now_checking]; P_diag_data[next_open] = P_diag_data[now_checking]; P_diag_j[next_open] = P_diag_j[now_checking]; now_checking++; next_open++; } } start_j = P_offd_i[i]; if (num_lost_offd) P_offd_i[i] -= num_lost_offd; for (j = start_j; j < P_offd_i[i+1]; j++) { row_sum += P_offd_data[now_checking_offd]; if (fabs(P_offd_data[now_checking_offd]) < max_coef) { num_lost_offd++; now_checking_offd++; } else { scale += P_offd_data[now_checking_offd]; P_offd_data[next_open_offd] = P_offd_data[now_checking_offd]; P_offd_j[next_open_offd] = P_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* normalize row of P */ if (scale != 0.) { if (scale != row_sum) { scale = row_sum/scale; for (j = P_diag_i[i]; j < (P_diag_i[i+1]-num_lost); j++) P_diag_data[j] *= scale; for (j = P_offd_i[i]; j < (P_offd_i[i+1]-num_lost_offd); j++) P_offd_data[j] *= scale; } } } /* end loop for (i = 0; i < n_fine; i++) */ /* store number of dropped elements and number of threads */ if(my_thread_num == 0) { max_num_threads[0] = num_threads; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /* * Truncate based on capping the nnz per row * */ if (max_elmts > 0) { HYPRE_Int P_mxnum, cnt1, last_index, last_index_offd; HYPRE_Int *P_aux_j; HYPRE_Real *P_aux_data; /* find maximum row length locally over this row range */ P_mxnum = 0; for (i=start; i<stop; i++) { /* Note P_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = P_diag_i[i+1]; last_index_offd = P_offd_i[i+1]; if(i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index-P_diag_i[i] + last_index_offd-P_offd_i[i]; if (cnt1 > P_mxnum) P_mxnum = cnt1; } /* Some rows exceed max_elmts, and require truncation. Essentially, * each thread truncates and compresses its range of rows locally. */ if (P_mxnum > max_elmts) { num_lost = 0; num_lost_offd = 0; /* two temporary arrays to hold row i for temporary operations */ P_aux_j = hypre_CTAlloc(HYPRE_Int, P_mxnum, HYPRE_MEMORY_HOST); P_aux_data = hypre_CTAlloc(HYPRE_Real, P_mxnum, HYPRE_MEMORY_HOST); cnt_diag = P_diag_i[start]; cnt_offd = P_offd_i[start]; for (i = start; i < stop; i++) { /* Note P_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = P_diag_i[i+1]; last_index_offd = P_offd_i[i+1]; if(i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = last_index-P_diag_i[i] + last_index_offd-P_offd_i[i]; if (max_elmts < num_elmts) { /* copy both diagonal and off-diag parts of row i to _aux_ arrays */ cnt = 0; for (j = P_diag_i[i]; j < last_index; j++) { P_aux_j[cnt] = P_diag_j[j]; P_aux_data[cnt++] = P_diag_data[j]; row_sum += P_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = P_offd_i[i]; j < last_index_offd; j++) { P_aux_j[cnt] = P_offd_j[j]+num_cols; P_aux_data[cnt++] = P_offd_data[j]; row_sum += P_offd_data[j]; } num_lost_offd += cnt-cnt1; /* sort data */ hypre_qsort2abs(P_aux_j,P_aux_data,0,cnt-1); scale = 0; if (i > start) { P_diag_i[i] = cnt_diag; P_offd_i[i] = cnt_offd; } for (j = 0; j < max_elmts; j++) { scale += P_aux_data[j]; if (P_aux_j[j] < num_cols) { P_diag_j[cnt_diag] = P_aux_j[j]; P_diag_data[cnt_diag++] = P_aux_data[j]; } else { P_offd_j[cnt_offd] = P_aux_j[j]-num_cols; P_offd_data[cnt_offd++] = P_aux_data[j]; } } num_lost -= cnt_diag-P_diag_i[i]; num_lost_offd -= cnt_offd-P_offd_i[i]; /* normalize row of P */ if (scale != 0.) { if (scale != row_sum) { scale = row_sum/scale; for (j = P_diag_i[i]; j < cnt_diag; j++) P_diag_data[j] *= scale; for (j = P_offd_i[i]; j < cnt_offd; j++) P_offd_data[j] *= scale; } } } /* end if (max_elmts < num_elmts) */ else { /* nothing dropped from this row, but still have to shift entries back * by the number dropped so far */ if (P_diag_i[i] != cnt_diag) { start_j = P_diag_i[i]; P_diag_i[i] = cnt_diag; for (j = start_j; j < last_index; j++) { P_diag_j[cnt_diag] = P_diag_j[j]; P_diag_data[cnt_diag++] = P_diag_data[j]; } } else cnt_diag += last_index-P_diag_i[i]; if (P_offd_i[i] != cnt_offd) { start_j = P_offd_i[i]; P_offd_i[i] = cnt_offd; for (j = start_j; j < last_index_offd; j++) { P_offd_j[cnt_offd] = P_offd_j[j]; P_offd_data[cnt_offd++] = P_offd_data[j]; } } else cnt_offd += last_index_offd-P_offd_i[i]; } } /* end for (i = 0; i < n_fine; i++) */ num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(P_aux_j, HYPRE_MEMORY_HOST); hypre_TFree(P_aux_data, HYPRE_MEMORY_HOST); } /* end if (P_mxnum > max_elmts) */ } /* end if (max_elmts > 0) */ /* Sum up num_lost_global */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for(i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* Each thread has it's own locally compressed CSR matrix from rows start * to stop. Now, we have to copy each thread's chunk into the new * process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * P_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if(my_thread_num == 0) { P_diag_size = P_diag_i[n_fine]; for(i = 0; i < max_num_threads[0]; i++) { P_diag_size -= num_lost_per_thread[i]; if(i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } P_diag_j_new = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_data_new = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if(my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = P_diag_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for(i = P_diag_i[start]; i < P_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { P_diag_j_new[next_open] = P_diag_j[i]; P_diag_data_new[next_open] = P_diag_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update P_diag_i with number of dropped entries by all lower ranked * threads */ if(my_thread_num > 0) { for(i=start; i<stop; i++) { P_diag_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if(my_thread_num == 0) { /* Set last entry */ P_diag_i[n_fine] = P_diag_size ; hypre_TFree(P_diag_j, HYPRE_MEMORY_SHARED); hypre_TFree(P_diag_data, HYPRE_MEMORY_SHARED); hypre_CSRMatrixJ(P_diag) = P_diag_j_new; hypre_CSRMatrixData(P_diag) = P_diag_data_new; hypre_CSRMatrixNumNonzeros(P_diag) = P_diag_size; } } /* * Synchronize and create new offd data structures */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if(my_thread_num == 0) { P_offd_size = P_offd_i[n_fine]; for(i = 0; i < max_num_threads[0]; i++) { P_offd_size -= num_lost_offd_per_thread[i]; if(i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } P_offd_j_new = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED); P_offd_data_new = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if(my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = P_offd_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for(i = P_offd_i[start]; i < P_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { P_offd_j_new[next_open] = P_offd_j[i]; P_offd_data_new[next_open] = P_offd_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update P_offd_i with number of dropped entries by all lower ranked * threads */ if(my_thread_num > 0) { for(i=start; i<stop; i++) { P_offd_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if(my_thread_num == 0) { /* Set last entry */ P_offd_i[n_fine] = P_offd_size ; hypre_TFree(P_offd_j, HYPRE_MEMORY_SHARED); hypre_TFree(P_offd_data, HYPRE_MEMORY_SHARED); hypre_CSRMatrixJ(P_offd) = P_offd_j_new; hypre_CSRMatrixData(P_offd) = P_offd_data_new; hypre_CSRMatrixNumNonzeros(P_offd) = P_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime(); #endif return ierr; } /* sort both v and w, in place, but based only on entries in w */ void hypre_qsort2abs( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right ) { HYPRE_Int i, last; if (left >= right) return; hypre_swap2( v, w, left, (left+right)/2); last = left; for (i = left+1; i <= right; i++) if (fabs(w[i]) > fabs(w[left])) { hypre_swap2(v, w, ++last, i); } hypre_swap2(v, w, left, last); hypre_qsort2abs(v, w, left, last-1); hypre_qsort2abs(v, w, last+1, right); } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach. * here we need to pass in a strength matrix built on the entire matrix. * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int kc; HYPRE_BigInt big_k; HYPRE_Int start; HYPRE_Int sgn; HYPRE_Int c_num; HYPRE_Real diagonal; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int print_level = 0; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; print_level = 1; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of A *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); } index = 0; for (i=0; i < num_cols_A_offd; i++) { for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_k = A_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { A_ext_j[index] = big_k - col_1; A_ext_data[index++] = A_ext_data[j]; } else { kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd); if (kc > -1) { A_ext_j[index] = (HYPRE_BigInt)(-kc-1); A_ext_data[index++] = A_ext_data[j]; } } } A_ext_i[i] = index; } for (i = num_cols_A_offd; i > 0; i--) A_ext_i[i] = A_ext_i[i-1]; if (num_procs > 1) A_ext_i[0] = 0; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. HERE, we only want to distribut to points of the SAME function type *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0 ) { sum += A_diag_data[jj1]; } } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } } if (sum != 0) { distribute = A_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of A for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1]; } } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1]; } } } } } else /* sum = 0 - only add to diag if the same function type */ { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. (only if the same function type) *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. AGAIN, we only want to distribut to points of the SAME function type *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = A_offd_j[jj]; sgn = 1; if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1; for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (i2 > -1) { /* in the diagonal block */ if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (i2 > -1) /* in the diagonal block */ { if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[-i2-1]] += distribute * A_ext_data[jj1]; } } } } else /* sum = 0 */ { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ if (diagonal == 0.0) { if (print_level) hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i); for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] = 0.0; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] = 0.0; } } else { for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } } strong_f_marker--; P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGTruncandBuild *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGTruncandBuild( hypre_ParCSRMatrix *P, HYPRE_Real trunc_factor, HYPRE_Int max_elmts) { hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P); hypre_ParCSRCommPkg *commpkg_P = hypre_ParCSRMatrixCommPkg(P); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(P); HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd); HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(P_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_offd); HYPRE_BigInt *new_col_map_offd; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int P_offd_size=0, new_num_cols_offd; HYPRE_Int *P_marker; HYPRE_Int i; HYPRE_Int index; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_offd_j = hypre_CSRMatrixJ(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_size = P_offd_i[n_fine]; } new_num_cols_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); /*#define HYPRE_SMP_PRIVATE i #include "../utilities/hypre_smp_forloop.h"*/ for (i=0; i < num_cols_offd; i++) P_marker[i] = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { new_num_cols_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < new_num_cols_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } /*#define HYPRE_SMP_PRIVATE i #include "../utilities/hypre_smp_forloop.h"*/ for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], new_num_cols_offd); } index = 0; for(i = 0; i < new_num_cols_offd; i++) { while (P_marker[index] == 0) index++; new_col_map_offd[i] = col_map_offd[index]; index++; } if (P_offd_size) hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (new_num_cols_offd) { hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(col_map_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_num_cols_offd; } if (commpkg_P != NULL) hypre_MatvecCommPkgDestroy(commpkg_P); hypre_MatvecCommPkgCreate(P); return hypre_error_flag; } hypre_ParCSRMatrix *hypre_CreateC( hypre_ParCSRMatrix *A, HYPRE_Real w) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrix *C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_Real *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; HYPRE_Real *C_offd_data; HYPRE_Int *C_offd_i; HYPRE_Int *C_offd_j; HYPRE_BigInt *col_map_offd_C; HYPRE_Int i, j, index; HYPRE_Real invdiag; HYPRE_Real w_local = w; C = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_rows, row_starts, row_starts, num_cols_offd, A_diag_i[num_rows], A_offd_i[num_rows]); hypre_ParCSRMatrixInitialize(C); C_diag = hypre_ParCSRMatrixDiag(C); C_offd = hypre_ParCSRMatrixOffd(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); col_map_offd_C = hypre_ParCSRMatrixColMapOffd(C); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 0; for (i=0; i < num_cols_offd; i++) col_map_offd_C[i] = col_map_offd_A[i]; for (i=0; i < num_rows; i++) { index = A_diag_i[i]; invdiag = -w/A_diag_data[index]; C_diag_data[index] = 1.0-w; C_diag_j[index] = A_diag_j[index]; if (w == 0) { w_local = fabs(A_diag_data[index]); for (j = index+1; j < A_diag_i[i+1]; j++) w_local += fabs(A_diag_data[j]); for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) w_local += fabs(A_offd_data[j]); invdiag = -1/w_local; C_diag_data[index] = 1.0-A_diag_data[index]/w_local; } C_diag_i[i] = index; C_offd_i[i] = A_offd_i[i]; for (j = index+1; j < A_diag_i[i+1]; j++) { C_diag_data[j] = A_diag_data[j]*invdiag; C_diag_j[j] = A_diag_j[j]; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { C_offd_data[j] = A_offd_data[j]*invdiag; C_offd_j[j] = A_offd_j[j]; } } C_diag_i[num_rows] = A_diag_i[num_rows]; C_offd_i[num_rows] = A_offd_i[num_rows]; return C; } /* RL */ HYPRE_Int hypre_BoomerAMGBuildInterpOnePnt( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; /* csr's */ hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; /* arrays */ HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int num_cols_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_BigInt *col_map_offd_P = NULL; /* CF marker off-diag part */ HYPRE_Int *CF_marker_offd = NULL; /* func type off-diag part */ HYPRE_Int *dof_func_offd = NULL; /* nnz */ HYPRE_Int nnz_diag, nnz_offd, cnt_diag, cnt_offd; HYPRE_Int *marker_diag, *marker_offd = NULL; /* local size */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); /* number of C-pts */ HYPRE_Int n_cpts = 0; /* fine to coarse mapping: diag part and offd part */ HYPRE_Int *fine_to_coarse; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_BigInt total_global_cpts, my_first_cpt; HYPRE_Int my_id, num_procs; HYPRE_Int num_sends; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_int_buf_data = NULL; //HYPRE_Int col_start = hypre_ParCSRMatrixFirstRowIndex(A); //HYPRE_Int col_end = col_start + n_fine; HYPRE_Int i, j, i1, j1, k1, index, start; HYPRE_Int *max_abs_cij; char *max_abs_diag_offd; HYPRE_Real max_abs_aij, vv; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ /* CF marker for the off-diag columns */ if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST); } /* function type indicator for the off-diag columns */ if (num_functions > 1 && num_cols_A_offd) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST); } /* if CommPkg of A is not present, create it */ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* number of sends to do (number of procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* send buffer, of size send_map_starts[num_sends]), * i.e., number of entries to send */ int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST); /* copy CF markers of elements to send to buffer * RL: why copy them with two for loops? Why not just loop through all in one */ index = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); /* loop through all elems to send_proc[i] */ for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { /* CF marker of send_map_elemts[j] */ int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); /* do a similar communication for dof_func */ if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } hypre_TFree(int_buf_data,HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping, * and find the most strongly influencing C-pt for each F-pt *-----------------------------------------------------------------------*/ /* nnz in diag and offd parts */ cnt_diag = 0; cnt_offd = 0; max_abs_cij = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST); max_abs_diag_offd = hypre_CTAlloc(char, n_fine,HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST); /* markers initialized as zeros */ marker_diag = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST); marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { //fine_to_coarse[i] = my_first_cpt + n_cpts; fine_to_coarse[i] = n_cpts; n_cpts++; continue; } /* mark all the strong connections: in S */ HYPRE_Int MARK = i + 1; /* loop through row i of S, diag part */ for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++) { marker_diag[S_diag_j[j]] = MARK; } /* loop through row i of S, offd part */ if (num_procs > 1) { for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { j1 = col_offd_S_to_A ? col_offd_S_to_A[S_offd_j[j]] : S_offd_j[j]; marker_offd[j1] = MARK; } } fine_to_coarse[i] = -1; /*--------------------------------------------------------------------------- * If i is an F-pt, interpolation is from the most strongly influencing C-pt * Find this C-pt and save it *--------------------------------------------------------------------------*/ /* if we failed to find any strong C-pt, mark this point as an 'n' */ char marker = 'n'; /* max abs val */ max_abs_aij = -1.0; /* loop through row i of A, diag part */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { i1 = A_diag_j[j]; vv = fabs(A_diag_data[j]); #if 0 /* !!! this is a hack just for code verification purpose !!! it basically says: 1. if we see |a_ij| < 1e-14, force it to be 1e-14 2. if we see |a_ij| == the max(|a_ij|) so far exactly, replace it if the j idx is smaller Reasons: 1. numerical round-off for eps-level values 2. entries in CSR rows may be listed in different orders */ vv = vv < 1e-14 ? 1e-14 : vv; if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv == max_abs_aij && i1 < max_abs_cij[i]) { /* mark it as a 'd' */ marker = 'd'; max_abs_cij[i] = i1; max_abs_aij = vv; continue; } #endif /* it is a strong C-pt and has abs val larger than what have seen */ if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv > max_abs_aij) { /* mark it as a 'd' */ marker = 'd'; max_abs_cij[i] = i1; max_abs_aij = vv; } } /* offd part */ if (num_procs > 1) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { i1 = A_offd_j[j]; vv = fabs(A_offd_data[j]); if (CF_marker_offd[i1] >= 0 && marker_offd[i1] == MARK && vv > max_abs_aij) { /* mark it as an 'o' */ marker = 'o'; max_abs_cij[i] = i1; max_abs_aij = vv; } } } max_abs_diag_offd[i] = marker; if (marker == 'd') { cnt_diag ++; } else if (marker == 'o') { cnt_offd ++; } } nnz_diag = cnt_diag + n_cpts; nnz_offd = cnt_offd; /*------------- allocate arrays */ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag,HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, nnz_diag,HYPRE_MEMORY_HOST); /* not in ``if num_procs > 1'', * allocation needed even for empty CSR */ P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd,HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, nnz_offd,HYPRE_MEMORY_HOST); /* redundant */ P_diag_i[0] = 0; P_offd_i[0] = 0; /* reset counters */ cnt_diag = 0; cnt_offd = 0; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd,HYPRE_MEMORY_HOST); big_int_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { big_int_buf_data[index++] = my_first_cpt +(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * Second Pass: Populate P *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { if (CF_marker[i] >= 0) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. *--------------------------------------------------------------------*/ //P_diag_j[cnt_diag] = fine_to_coarse[i] - my_first_cpt; P_diag_j[cnt_diag] = fine_to_coarse[i]; P_diag_data[cnt_diag++] = 1.0; } else { /*--------------------------------------------------------------------------- * If i is an F-pt, interpolation is from the most strongly influencing C-pt *--------------------------------------------------------------------------*/ if (max_abs_diag_offd[i] == 'd') { /* on diag part of P */ j = max_abs_cij[i]; //P_diag_j[cnt_diag] = fine_to_coarse[j] - my_first_cpt; P_diag_j[cnt_diag] = fine_to_coarse[j]; P_diag_data[cnt_diag++] = 1.0; } else if (max_abs_diag_offd[i] == 'o') { /* on offd part of P */ j = max_abs_cij[i]; P_offd_j[cnt_offd] = j; P_offd_data[cnt_offd++] = 1.0; } } P_diag_i[i+1] = cnt_diag; P_offd_i[i+1] = cnt_offd; } hypre_assert(cnt_diag == nnz_diag); hypre_assert(cnt_offd == nnz_offd); /* num of cols in the offd part of P */ num_cols_offd_P = 0; /* marker_offd: all -1 */ for (i = 0; i < num_cols_A_offd; i++) { marker_offd[i] = -1; } for (i = 0; i < nnz_offd; i++) { i1 = P_offd_j[i]; if (marker_offd[i1] == -1) { num_cols_offd_P++; marker_offd[i1] = 1; } } /* col_map_offd_P: the col indices of the offd of P * we first keep them be the offd-idx of A */ col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P,HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P,HYPRE_MEMORY_HOST); for (i = 0, i1 = 0; i < num_cols_A_offd; i++) { if (marker_offd[i] == 1) { tmp_map_offd[i1++] = i; } } hypre_assert(i1 == num_cols_offd_P); /* now, adjust P_offd_j to local idx w.r.t col_map_offd_R * by searching */ for (i = 0; i < nnz_offd; i++) { i1 = P_offd_j[i]; k1 = hypre_BinarySearch(tmp_map_offd, i1, num_cols_offd_P); /* search must succeed */ hypre_assert(k1 >= 0 && k1 < num_cols_offd_P); P_offd_j[i] = k1; } /* change col_map_offd_P to global coarse ids */ for (i = 0; i < num_cols_offd_P; i++) { col_map_offd_P[i] = fine_to_coarse_offd[tmp_map_offd[i]]; } /* Now, we should have everything of Parcsr matrix P */ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumCols(A), /* global num of rows */ total_global_cpts, /* global num of cols */ hypre_ParCSRMatrixColStarts(A), /* row_starts */ num_cpts_global, /* col_starts */ num_cols_offd_P, /* num cols offd */ nnz_diag, nnz_offd); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; /* P does not own ColStarts, since A does */ hypre_ParCSRMatrixOwnsRowStarts(P) = 0; hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; /* create CommPkg of P */ hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* free workspace */ hypre_TFree(CF_marker_offd,HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd,HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd,HYPRE_MEMORY_HOST); hypre_TFree(big_int_buf_data,HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse,HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd,HYPRE_MEMORY_HOST); hypre_TFree(marker_diag,HYPRE_MEMORY_HOST); hypre_TFree(marker_offd,HYPRE_MEMORY_HOST); hypre_TFree(max_abs_cij,HYPRE_MEMORY_HOST); hypre_TFree(max_abs_diag_offd,HYPRE_MEMORY_HOST); return hypre_error_flag; }
outeronly2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized. The inner loop has loop carried anti data dependence. However, the loop is not parallelized so no race condition. Source: based on AutoPar's regression test. */ int n=100, m=100; double b[100][100]; void foo() { int i,j; #pragma omp parallel for private(j) for (i=0;i<n;i++) for (j=1;j<m;j++) // Be careful about bounds of j b[i][j]=b[i][j-1]; } int main() { foo(); return 0; }
expm_multiply_parallel.h
#ifndef _EXPM_MULTIPLY_H #define _EXPM_MULTIPLY_H #include "complex_ops.h" #include <stdio.h> #include "openmp.h" #if defined(_OPENMP) #include "csrmv_merge.h" #else template<typename I, typename T1,typename T2,typename T3> void csr_matvec(const bool overwrite_y, const I n, const I Ap[], const I Aj[], const T1 Ax[], const T2 a, const T3 x[], I rco[], T3 vco[], T3 y[]) { if(overwrite_y){ for(I k = 0; k<n; k++){ T3 sum = 0; for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ sum += Ax[jj] * x[Aj[jj]]; } y[k] = a * sum; } }else{ for(I k = 0; k<n; k++){ T3 sum = 0; for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ sum += Ax[jj] * x[Aj[jj]]; } y[k] += a * sum; } } } #endif #include <algorithm> #include <vector> #include "math_functions.h" // #include <valarray> // std::valarray, std::slice template<typename I, typename T1,typename T2,typename T3> void expm_multiply(const I n, const I Ap[], const I Aj[], const T1 Ax[], const int s, const int m_star, const T2 tol, const T1 mu, const T3 a, T3 F[], T3 work[] ) { const int num_threads = omp_get_max_threads(); std::vector<I> rco_vec(num_threads,0); std::vector<T3> vco_vec(num_threads,0); std::vector<T2> c1_threads_vec(num_threads,0); std::vector<T2> c2_threads_vec(num_threads,0); std::vector<T2> c3_threads_vec(num_threads,0); T3 * B1 = work; T3 * B2 = work + n; I * rco = &rco_vec[0]; T3 * vco = &vco_vec[0]; T2 * c1_threads = &c1_threads_vec[0]; T2 * c2_threads = &c2_threads_vec[0]; T2 * c3_threads = &c3_threads_vec[0]; bool exit_loop=false; #pragma omp parallel shared(exit_loop,c1_threads,c2_threads,c3_threads,F,B1,B2,rco,vco) firstprivate(num_threads) { const int tid = omp_get_thread_num(); const I items_per_thread = (n+num_threads-1)/num_threads; const I begin = std::min(items_per_thread * tid, n); const I end = std::min(begin+items_per_thread, n); const T3 eta = math_functions::exp(a*(mu/T2(s))); T2 c1_thread=0,c2_thread=0,c3_thread=0,c1=0,c2=0,c3=0; c1_thread = 0; for(I k=begin;k<end;k++){ T3 f = F[k]; B1[k] = f; c1_thread = std::max(c1_thread,math_functions::abs(f)); } #pragma omp barrier if(tid==0){ c1 = *std::max_element(c1_threads,c1_threads+num_threads); } for(int i=0;i<s;i++){ #pragma omp single { exit_loop = false; } for(int j=1;j<m_star+1 && !exit_loop;j++){ #if defined(_OPENMP) csrmv_merge<I,T1,T3,T3>(true,n,Ap,Aj,Ax,a/T2(j*s),B1,rco,vco,B2); // implied barrier #else csr_matvec<I,T1,T3,T3>(true,n,Ap,Aj,Ax,a/T2(j*s),B1,rco,vco,B2); #endif c2_thread = 0; c3_thread = 0; for(I k=begin;k<end;k++){ T3 b2 = B2[k]; T3 f = F[k] += b2; B1[k] = b2; // used cached values to compute comparisons for infinite norm c2_thread = std::max(c2_thread,math_functions::abs(b2)); c3_thread = std::max(c3_thread,math_functions::abs(f)); } c2_threads[tid] = c2_thread; c3_threads[tid] = c3_thread; #pragma omp barrier if(tid==0){ c2 = *std::max_element(c2_threads,c2_threads+num_threads); c3 = *std::max_element(c3_threads,c3_threads+num_threads); exit_loop = ((c1+c2)<=(tol*c3)); c1 = c2; } #pragma omp barrier } c1_thread = 0; for(I k=begin;k<end;k++){ T3 f = F[k] *= eta; B1[k] = f; // used cached values to compute comparisons for infinite norm c1_thread = std::max(c1_thread,math_functions::abs(f)); } c1_threads[tid] = c1_thread; #pragma omp barrier if(tid==0){ c1 = *std::max_element(c1_threads,c1_threads+num_threads); } } } } #endif
original.h
#ifndef _ORIGINAL_H_ #define _ORIGINAL_H_ #include <assert.h> #include <iostream> #include <malloc.h> #include <memory> #include "omp.h" #include <stdlib.h> #include <time.h> #include <vector> using namespace std; void origin_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { #pragma omp parallel for collapse(5) for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int s = 0; s < Ns; s++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += In[b * Ns * (Nx + Nw - 1) * (Ny + Nh - 1) + s * (Nx + Nw - 1) * (Ny + Nh - 1) + (x + w) * (Ny + Nh - 1) + (y + h)] * Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; } } } } } } } } void compare(float *C1, float *C2, int size) { cout << "comparing" << endl; for (int i = 0; i < size; i++) { if (C1[i] != C2[i]) { cout << "data at " << i << " C1=" << C1[i] << ", C2=" << C2[i] << endl; exit(1); } } cout << "fin compare\n"; } inline void partial_ukr(float *A, float *B, float *C, long long s_tile, int Rt, int Rxy, int Rw, int Rh, long long int* bcast_stride , int Aoff, int Boff, int Coff, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh ) { /* int Nx = 112; */ /* int Ny = 112; */ /* int Nw = 3; */ /* int Nh = 3; */ /* int Nt = 128; */ /* int Ns = 64; */ /* int Nb = 1; */ // cout<<"simu call"<<endl; for (int s = 0; s < s_tile; s++) { for (int xy = 0; xy < Rxy; xy++) { for (int w = 0; w < Rw; w++) { for (int h = 0; h < Rh; h++) { for (int t = 0; t < Rt; t++) { C[t * Nx * Ny + xy] += A[s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)] * B[t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt]; // if(Coff + t * Nx * Ny + xy==12540){ // cout<<"C12540 use A"<<s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)+Aoff<<" and B"<<t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt+Boff<<endl; // } } } } } } } inline void partial_ukr_peelker_16(float *A, float *B, float *C, long long s_tile, int Rt, int Rxy, int Rw, int Rh, long long int* bcast_stride , int Aoff, int Boff, int Coff, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh ) { /* int Nx = 112; */ /* int Ny = 112; */ /* int Nw = 3; */ /* int Nh = 3; */ /* int Nt = 128; */ /* int Ns = 64; */ /* int Nb = 1; */ // cout<<"simu call"<<endl; for (int s = 0; s < s_tile; s++) { for (int xy = 0; xy < Rxy; xy++) { for (int w = 0; w < Rw; w++) { for (int h = 0; h < Rh; h++) { for (int t = 0; t < Rt; t++) { C[t * Nx * Ny + xy] += A[s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)] * B[(t&15) + s * Nw * Nh * 16 + w * Nh*16 + h*16 + (t>>4)*Ns*Nw*Nh*16]; // B[t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt]; // if(Coff + t * Nx * Ny + xy==12540){ // cout<<"C12540 use A"<<s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)+Aoff<<" and B"<<t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt+Boff<<endl; // } } } } } } } //#endif inline void partial_ukr_peelker_1_16_unroll3x3(float *A, float *B, float *C, long long s_tile, int Rt, int Rxy, int Rw, int Rh, long long int* bcast_stride , int Aoff, int Boff, int Coff, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh ) { /* int Nx = 112; */ /* int Ny = 112; */ /* int Nw = 3; */ /* int Nh = 3; */ /* int Nt = 128; */ /* int Ns = 64; */ /* int Nb = 1; */ // cout<<"simu call"<<endl; for (int s = 0; s < s_tile; s++) { for (int w = 0; w < 3; w++) { for (int h = 0; h < 3; h++) { for (int xy = 0; xy < 1; xy++) { for (int t = 0; t < 16; t++) { C[t * Nx * Ny + xy] += A[s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)] * B[(t) + s * Nw * Nh * 16 + w * Nh*16 + h*16 ]; }//end t } } } } } inline void partial_ukr_peelker_1_16_unroll1x1(float *A, float *B, float *C, long long s_tile, int Rt, int Rxy, int Rw, int Rh, long long int* bcast_stride , int Aoff, int Boff, int Coff, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh ) { /* int Nx = 112; */ /* int Ny = 112; */ /* int Nw = 3; */ /* int Nh = 3; */ /* int Nt = 128; */ /* int Ns = 64; */ /* int Nb = 1; */ // cout<<"simu call"<<endl; for (int s = 0; s < s_tile; s++) { for (int w = 0; w < 1; w++) { for (int h = 0; h < 1; h++) { for (int xy = 0; xy < 1; xy++) { for (int t = 0; t < 16; t++) { C[t * Nx * Ny + xy] += A[s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)] * B[(t) + s * Nw * Nh * 16 + w * Nh*16 + h*16 ]; }//end t } } } } } //inline void partial_f_13 inline void partial_ukr_peelker_13(float *A, float *B, float *C, long long s_tile, int Rt, int Rxy, int Rw, int Rh, long long int* bcast_stride , int Aoff, int Boff, int Coff, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh ) { /* int Nx = 112; */ /* int Ny = 112; */ /* int Nw = 3; */ /* int Nh = 3; */ /* int Nt = 128; */ /* int Ns = 64; */ /* int Nb = 1; */ // cout<<"simu call"<<endl; for (int s = 0; s < s_tile; s++) { for (int xy = 0; xy < Rxy; xy++) { for (int w = 0; w < Rw; w++) { for (int h = 0; h < Rh; h++) { for (int t = 0; t < Rt; t++) { C[t * Nx * Ny + xy] += A[s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)] * // B[(t&15) + s * Nw * Nh * 13 + w * Nh*13 + h*13 + (t>>4)*Ns*Nw*Nh*16]; B[t + s * Nw * Nh * 13 + w * Nh*13 + h*13 ]; // B[t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt]; // if(Coff + t * Nx * Ny + xy==12540){ // cout<<"C12540 use A"<<s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)+Aoff<<" and B"<<t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt+Boff<<endl; // } } } } } } } #endif
ast-dump-openmp-parallel-for.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp parallel for for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp parallel for for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp parallel for collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp parallel for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp parallel for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:4:1, col:25> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:10:1, col:25> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:17:1, col:37> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:26, col:36> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:35> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:35> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:24:1, col:37> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:26, col:36> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:35> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:35> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPParallelForDirective {{.*}} <line:31:1, col:37> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:26, col:36> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:35> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:35> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
gimplify.c
/* Modula-3: modified */ /* Tree lowering pass. This pass converts the GENERIC functions-as-trees tree representation into the GIMPLE form. Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. Major work done by Sebastian Pop <s.pop@laposte.net>, Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "gimple.h" #include "tree-iterator.h" #include "tree-inline.h" #include "tree-pretty-print.h" #include "langhooks.h" #include "tree-flow.h" #include "cgraph.h" #include "timevar.h" #include "hashtab.h" #include "flags.h" #include "function.h" #include "output.h" #include "ggc.h" #include "diagnostic-core.h" #include "target.h" #include "pointer-set.h" #include "splay-tree.h" #include "vec.h" #include "gimple.h" #include "tree-pass.h" #include "langhooks-def.h" /* FIXME: for lhd_set_decl_assembler_name. */ #include "expr.h" /* FIXME: for can_move_by_pieces and STACK_CHECK_MAX_VAR_SIZE. */ EXTERN_C_START static struct gimplify_ctx *gimplify_ctxp; /* Formal (expression) temporary table handling: Multiple occurrences of the same scalar expression are evaluated into the same temporary. */ typedef struct gimple_temp_hash_elt { tree val; /* Key */ tree temp; /* Value */ } elt_t; /* Forward declarations. */ static enum gimplify_status gimplify_compound_expr (tree *, gimple_seq *, bool); /* Mark X addressable. Unlike the langhook we expect X to be in gimple form and we don't do any syntax checking. */ void mark_addressable (tree x) { while (handled_component_p (x)) x = TREE_OPERAND (x, 0); if (TREE_CODE (x) == MEM_REF && TREE_CODE (TREE_OPERAND (x, 0)) == ADDR_EXPR) x = TREE_OPERAND (TREE_OPERAND (x, 0), 0); if (TREE_CODE (x) != VAR_DECL && TREE_CODE (x) != PARM_DECL && TREE_CODE (x) != RESULT_DECL) return; TREE_ADDRESSABLE (x) = 1; } /* Return a hash value for a formal temporary table entry. */ static hashval_t gimple_tree_hash (const void *p) { tree t = ((const elt_t *) p)->val; return iterative_hash_expr (t, 0); } /* Compare two formal temporary table entries. */ static int gimple_tree_eq (const void *p1, const void *p2) { tree t1 = ((const elt_t *) p1)->val; tree t2 = ((const elt_t *) p2)->val; enum tree_code code = TREE_CODE (t1); if (TREE_CODE (t2) != code || TREE_TYPE (t1) != TREE_TYPE (t2)) return 0; if (!operand_equal_p (t1, t2, 0)) return 0; #ifdef ENABLE_CHECKING /* Only allow them to compare equal if they also hash equal; otherwise results are nondeterminate, and we fail bootstrap comparison. */ gcc_assert (gimple_tree_hash (p1) == gimple_tree_hash (p2)); #endif return 1; } /* Link gimple statement GS to the end of the sequence *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_stmt, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ void gimplify_seq_add_stmt (gimple_seq *seq_p, gimple gs) { gimple_stmt_iterator si; if (gs == NULL) return; if (*seq_p == NULL) *seq_p = gimple_seq_alloc (); si = gsi_last (*seq_p); gsi_insert_after_without_update (&si, gs, GSI_NEW_STMT); } /* Append sequence SRC to the end of sequence *DST_P. If *DST_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_seq, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ static void gimplify_seq_add_seq (gimple_seq *dst_p, gimple_seq src) { gimple_stmt_iterator si; if (src == NULL) return; if (*dst_p == NULL) *dst_p = gimple_seq_alloc (); si = gsi_last (*dst_p); gsi_insert_seq_after_without_update (&si, src, GSI_NEW_STMT); } /* Set up a context for the gimplifier. */ void push_gimplify_context (struct gimplify_ctx *c) { memset (c, '\0', sizeof (*c)); c->prev_context = gimplify_ctxp; gimplify_ctxp = c; } /* Tear down a context for the gimplifier. If BODY is non-null, then put the temporaries into the outer BIND_EXPR. Otherwise, put them in the local_decls. BODY is not a sequence, but the first tuple in a sequence. */ void pop_gimplify_context (gimple body) { struct gimplify_ctx *c = gimplify_ctxp; gcc_assert (c && (c->bind_expr_stack == NULL || VEC_empty (gimple, c->bind_expr_stack))); VEC_free (gimple, heap, c->bind_expr_stack); gimplify_ctxp = c->prev_context; if (body) declare_vars (c->temps, body, false); else record_vars (c->temps); if (c->temp_htab) htab_delete (c->temp_htab); } static void gimple_push_bind_expr (gimple gimple_bind) { if (gimplify_ctxp->bind_expr_stack == NULL) gimplify_ctxp->bind_expr_stack = VEC_alloc (gimple, heap, 8); VEC_safe_push (gimple, heap, gimplify_ctxp->bind_expr_stack, gimple_bind); } static void gimple_pop_bind_expr (void) { VEC_pop (gimple, gimplify_ctxp->bind_expr_stack); } gimple gimple_current_bind_expr (void) { return VEC_last (gimple, gimplify_ctxp->bind_expr_stack); } /* Return the stack GIMPLE_BINDs created during gimplification. */ VEC(gimple, heap) * gimple_bind_expr_stack (void) { return gimplify_ctxp->bind_expr_stack; } /* Returns true iff there is a COND_EXPR between us and the innermost CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */ static bool gimple_conditional_context (void) { return gimplify_ctxp->conditions > 0; } /* Note that we've entered a COND_EXPR. */ static void gimple_push_condition (void) { #ifdef ENABLE_GIMPLE_CHECKING if (gimplify_ctxp->conditions == 0) gcc_assert (gimple_seq_empty_p (gimplify_ctxp->conditional_cleanups)); #endif ++(gimplify_ctxp->conditions); } /* Note that we've left a COND_EXPR. If we're back at unconditional scope now, add any conditional cleanups we've seen to the prequeue. */ static void gimple_pop_condition (gimple_seq *pre_p) { int conds = --(gimplify_ctxp->conditions); gcc_assert (conds >= 0); if (conds == 0) { gimplify_seq_add_seq (pre_p, gimplify_ctxp->conditional_cleanups); gimplify_ctxp->conditional_cleanups = NULL; } } /* A stable comparison routine for use with splay trees and DECLs. */ static int splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb) { tree a = (tree) xa; tree b = (tree) xb; return DECL_UID (a) - DECL_UID (b); } /* Both gimplify the statement T and append it to *SEQ_P. This function behaves exactly as gimplify_stmt, but you don't have to pass T as a reference. */ void gimplify_and_add (tree t, gimple_seq *seq_p) { gimplify_stmt (&t, seq_p); } /* Gimplify statement T into sequence *SEQ_P, and return the first tuple in the sequence of generated tuples for this statement. Return NULL if gimplifying T produced no tuples. */ static gimple gimplify_and_return_first (tree t, gimple_seq *seq_p) { gimple_stmt_iterator last = gsi_last (*seq_p); gimplify_and_add (t, seq_p); if (!gsi_end_p (last)) { gsi_next (&last); return gsi_stmt (last); } else return gimple_seq_first_stmt (*seq_p); } /* Strip off a legitimate source ending from the input string NAME of length LEN. Rather than having to know the names used by all of our front ends, we strip off an ending of a period followed by up to five characters. (Java uses ".class".) */ static inline void remove_suffix (char *name, int len) { int i; for (i = 2; i < 8 && len > i; i++) { if (name[len - i] == '.') { name[len - i] = '\0'; break; } } } /* Create a new temporary name with PREFIX. Returns an identifier. */ static GTY(()) unsigned int tmp_var_id_num; tree create_tmp_var_name (const char *prefix) { char *tmp_name; if (prefix) { char *preftmp = ASTRDUP (prefix); remove_suffix (preftmp, strlen (preftmp)); prefix = preftmp; } ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix ? prefix : "T", tmp_var_id_num++); return get_identifier (tmp_name); } /* Create a new temporary variable declaration of type TYPE. Does NOT push it into the current binding. */ tree create_tmp_var_raw (tree type, const char *prefix) { tree tmp_var; tree new_type; /* Make the type of the variable writable. */ new_type = build_type_variant (type, 0, 0); TYPE_ATTRIBUTES (new_type) = TYPE_ATTRIBUTES (type); tmp_var = build_decl (input_location, VAR_DECL, prefix ? create_tmp_var_name (prefix) : NULL, type); /* The variable was declared by the compiler. */ DECL_ARTIFICIAL (tmp_var) = 1; /* And we don't want debug info for it. */ DECL_IGNORED_P (tmp_var) = 1; /* Make the variable writable. */ TREE_READONLY (tmp_var) = 0; DECL_EXTERNAL (tmp_var) = 0; TREE_STATIC (tmp_var) = 0; TREE_USED (tmp_var) = 1; return tmp_var; } /* Create a new temporary variable declaration of type TYPE. DOES push the variable into the current binding. Further, assume that this is called only from gimplification or optimization, at which point the creation of certain types are bugs. */ tree create_tmp_var (tree type, const char *prefix) { tree tmp_var; /* We don't allow types that are addressable (meaning we can't make copies), or incomplete. We also used to reject every variable size objects here, but now support those for which a constant upper bound can be obtained. The processing for variable sizes is performed in gimple_add_tmp_var, point at which it really matters and possibly reached via paths not going through this function, e.g. after direct calls to create_tmp_var_raw. */ gcc_assert (!TREE_ADDRESSABLE (type) && COMPLETE_TYPE_P (type)); tmp_var = create_tmp_var_raw (type, prefix); gimple_add_tmp_var (tmp_var); return tmp_var; } /* Create a new temporary variable declaration of type TYPE by calling create_tmp_var and if TYPE is a vector or a complex number, mark the new temporary as gimple register. */ tree create_tmp_reg (tree type, const char *prefix) { tree tmp; tmp = create_tmp_var (type, prefix); if (TREE_CODE (type) == COMPLEX_TYPE || TREE_CODE (type) == VECTOR_TYPE) DECL_GIMPLE_REG_P (tmp) = 1; return tmp; } /* Create a temporary with a name derived from VAL. Subroutine of lookup_tmp_var; nobody else should call this function. */ static inline tree create_tmp_from_val (tree val) { return create_tmp_var (TREE_TYPE (val), get_name (val)); } /* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse an existing expression temporary. */ static tree lookup_tmp_var (tree val, bool is_formal) { tree ret; /* If not optimizing, never really reuse a temporary. local-alloc won't allocate any variable that is used in more than one basic block, which means it will go into memory, causing much extra work in reload and final and poorer code generation, outweighing the extra memory allocation here. */ if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val)) ret = create_tmp_from_val (val); else { elt_t elt, *elt_p; void **slot; elt.val = val; if (gimplify_ctxp->temp_htab == NULL) gimplify_ctxp->temp_htab = htab_create (1000, gimple_tree_hash, gimple_tree_eq, free); slot = htab_find_slot (gimplify_ctxp->temp_htab, (void *)&elt, INSERT); if (*slot == NULL) { elt_p = XNEW (elt_t); elt_p->val = val; elt_p->temp = ret = create_tmp_from_val (val); *slot = (void *) elt_p; } else { elt_p = (elt_t *) *slot; ret = elt_p->temp; } } return ret; } /* Return true if T is a CALL_EXPR or an expression that can be assigned to a temporary. Note that this predicate should only be used during gimplification. See the rationale for this in gimplify_modify_expr. */ static bool is_gimple_reg_rhs_or_call (tree t) { return (get_gimple_rhs_class (TREE_CODE (t)) != GIMPLE_INVALID_RHS || TREE_CODE (t) == CALL_EXPR); } /* Return true if T is a valid memory RHS or a CALL_EXPR. Note that this predicate should only be used during gimplification. See the rationale for this in gimplify_modify_expr. */ static bool is_gimple_mem_rhs_or_call (tree t) { /* If we're dealing with a renamable type, either source or dest must be a renamed variable. */ if (is_gimple_reg_type (TREE_TYPE (t))) return is_gimple_val (t); else return (is_gimple_val (t) || is_gimple_lvalue (t) || TREE_CODE (t) == CALL_EXPR); } /* Helper for get_formal_tmp_var and get_initialized_tmp_var. */ static tree internal_get_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p, bool is_formal) { tree t, mod; /* Notice that we explicitly allow VAL to be a CALL_EXPR so that we can create an INIT_EXPR and convert it into a GIMPLE_CALL below. */ gimplify_expr (&val, pre_p, post_p, is_gimple_reg_rhs_or_call, fb_rvalue); t = lookup_tmp_var (val, is_formal); if (is_formal && (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)) DECL_GIMPLE_REG_P (t) = 1; mod = build2 (INIT_EXPR, TREE_TYPE (t), t, unshare_expr (val)); SET_EXPR_LOCATION (mod, EXPR_LOC_OR_HERE (val)); /* gimplify_modify_expr might want to reduce this further. */ gimplify_and_add (mod, pre_p); ggc_free (mod); /* If we're gimplifying into ssa, gimplify_modify_expr will have given our temporary an SSA name. Find and return it. */ if (gimplify_ctxp->into_ssa) { gimple last = gimple_seq_last_stmt (*pre_p); t = gimple_get_lhs (last); } return t; } /* Returns a formal temporary variable initialized with VAL. PRE_P is as in gimplify_expr. Only use this function if: 1) The value of the unfactored expression represented by VAL will not change between the initialization and use of the temporary, and 2) The temporary will not be otherwise modified. For instance, #1 means that this is inappropriate for SAVE_EXPR temps, and #2 means it is inappropriate for && temps. For other cases, use get_initialized_tmp_var instead. */ tree get_formal_tmp_var (tree val, gimple_seq *pre_p) { return internal_get_tmp_var (val, pre_p, NULL, true); } /* Returns a temporary variable initialized with VAL. PRE_P and POST_P are as in gimplify_expr. */ tree get_initialized_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p) { return internal_get_tmp_var (val, pre_p, post_p, false); } /* Declares all the variables in VARS in SCOPE. If DEBUG_INFO is true, generate debug info for them; otherwise don't. */ void declare_vars (tree vars, gimple scope, bool debug_info) { tree last = vars; if (last) { tree temps, block; gcc_assert (gimple_code (scope) == GIMPLE_BIND); temps = nreverse (last); block = gimple_bind_block (scope); gcc_assert (!block || TREE_CODE (block) == BLOCK); if (!block || !debug_info) { DECL_CHAIN (last) = gimple_bind_vars (scope); gimple_bind_set_vars (scope, temps); } else { /* We need to attach the nodes both to the BIND_EXPR and to its associated BLOCK for debugging purposes. The key point here is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */ if (BLOCK_VARS (block)) BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps); else { gimple_bind_set_vars (scope, chainon (gimple_bind_vars (scope), temps)); BLOCK_VARS (block) = temps; } } } } /* For VAR a VAR_DECL of variable size, try to find a constant upper bound for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if no such upper bound can be obtained. */ static void force_constant_size (tree var) { /* The only attempt we make is by querying the maximum size of objects of the variable's type. */ HOST_WIDE_INT max_size; gcc_assert (TREE_CODE (var) == VAR_DECL); max_size = max_int_size_in_bytes (TREE_TYPE (var)); gcc_assert (max_size >= 0); DECL_SIZE_UNIT (var) = build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size); DECL_SIZE (var) = build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT); } void gimple_add_tmp_var (tree tmp) { gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp)); /* Later processing assumes that the object size is constant, which might not be true at this point. Force the use of a constant upper bound in this case. */ if (!host_integerp (DECL_SIZE_UNIT (tmp), 1)) force_constant_size (tmp); DECL_CONTEXT (tmp) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1; if (gimplify_ctxp) { DECL_CHAIN (tmp) = gimplify_ctxp->temps; gimplify_ctxp->temps = tmp; } else if (cfun) record_vars (tmp); else { gimple_seq body_seq; /* This case is for nested functions. We need to expose the locals they create. */ body_seq = gimple_body (current_function_decl); declare_vars (tmp, gimple_seq_first_stmt (body_seq), false); } } /* Determines whether to assign a location to the statement GS. */ static bool should_carry_location_p (gimple gs) { /* Don't emit a line note for a label. We particularly don't want to emit one for the break label, since it doesn't actually correspond to the beginning of the loop/switch. */ if (gimple_code (gs) == GIMPLE_LABEL) return false; return true; } /* Return true if a location should not be emitted for this statement by annotate_one_with_location. */ static inline bool gimple_do_not_emit_location_p (gimple g) { return gimple_plf (g, GF_PLF_1); } /* Mark statement G so a location will not be emitted by annotate_one_with_location. */ static inline void gimple_set_do_not_emit_location (gimple g) { /* The PLF flags are initialized to 0 when a new tuple is created, so no need to initialize it anywhere. */ gimple_set_plf (g, GF_PLF_1, true); } /* Set the location for gimple statement GS to LOCATION. */ static void annotate_one_with_location (gimple gs, location_t location) { if (!gimple_has_location (gs) && !gimple_do_not_emit_location_p (gs) && should_carry_location_p (gs)) gimple_set_location (gs, location); } /* Set LOCATION for all the statements after iterator GSI in sequence SEQ. If GSI is pointing to the end of the sequence, start with the first statement in SEQ. */ static void annotate_all_with_location_after (gimple_seq seq, gimple_stmt_iterator gsi, location_t location) { if (gsi_end_p (gsi)) gsi = gsi_start (seq); else gsi_next (&gsi); for (; !gsi_end_p (gsi); gsi_next (&gsi)) annotate_one_with_location (gsi_stmt (gsi), location); } /* Set the location for all the statements in a sequence STMT_P to LOCATION. */ void annotate_all_with_location (gimple_seq stmt_p, location_t location) { gimple_stmt_iterator i; if (gimple_seq_empty_p (stmt_p)) return; for (i = gsi_start (stmt_p); !gsi_end_p (i); gsi_next (&i)) { gimple gs = gsi_stmt (i); annotate_one_with_location (gs, location); } } /* This page contains routines to unshare tree nodes, i.e. to duplicate tree nodes that are referenced more than once in GENERIC functions. This is necessary because gimplification (translation into GIMPLE) is performed by modifying tree nodes in-place, so gimplication of a shared node in a first context could generate an invalid GIMPLE form in a second context. This is achieved with a simple mark/copy/unmark algorithm that walks the GENERIC representation top-down, marks nodes with TREE_VISITED the first time it encounters them, duplicates them if they already have TREE_VISITED set, and finally removes the TREE_VISITED marks it has set. The algorithm works only at the function level, i.e. it generates a GENERIC representation of a function with no nodes shared within the function when passed a GENERIC function (except for nodes that are allowed to be shared). At the global level, it is also necessary to unshare tree nodes that are referenced in more than one function, for the same aforementioned reason. This requires some cooperation from the front-end. There are 2 strategies: 1. Manual unsharing. The front-end needs to call unshare_expr on every expression that might end up being shared across functions. 2. Deep unsharing. This is an extension of regular unsharing. Instead of calling unshare_expr on expressions that might be shared across functions, the front-end pre-marks them with TREE_VISITED. This will ensure that they are unshared on the first reference within functions when the regular unsharing algorithm runs. The counterpart is that this algorithm must look deeper than for manual unsharing, which is specified by LANG_HOOKS_DEEP_UNSHARING. If there are only few specific cases of node sharing across functions, it is probably easier for a front-end to unshare the expressions manually. On the contrary, if the expressions generated at the global level are as widespread as expressions generated within functions, deep unsharing is very likely the way to go. */ /* Similar to copy_tree_r but do not copy SAVE_EXPR or TARGET_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Do not copy SAVE_EXPR, TARGET_EXPR or BIND_EXPR nodes themselves, but copy their subtrees if we can make sure to do it only once. */ if (code == SAVE_EXPR || code == TARGET_EXPR || code == BIND_EXPR) { if (data && !pointer_set_insert ((struct pointer_set_t *)data, t)) ; else *walk_subtrees = 0; } /* Stop at types, decls, constants like copy_tree_r. */ else if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant /* We can't do anything sensible with a BLOCK used as an expression, but we also can't just die when we see it because of non-expression uses. So we avert our eyes and cross our fingers. Silly Java. */ || code == BLOCK) *walk_subtrees = 0; /* Cope with the statement expression extension. */ else if (code == STATEMENT_LIST) ; /* Leave the bulk of the work to copy_tree_r itself. */ else copy_tree_r (tp, walk_subtrees, NULL); return NULL_TREE; } /* Callback for walk_tree to unshare most of the shared trees rooted at *TP. If *TP has been visited already (i.e., TREE_VISITED (*TP) == 1), then *TP is deep copied by calling mostly_copy_tree_r. */ static tree copy_if_shared_r (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Skip types, decls, and constants. But we do want to look at their types and the bounds of types. Mark them as visited so we properly unmark their subtrees on the unmark pass. If we've already seen them, don't look down further. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant) { if (TREE_VISITED (t)) *walk_subtrees = 0; else TREE_VISITED (t) = 1; } /* If this node has been visited already, unshare it and don't look any deeper. */ else if (TREE_VISITED (t)) { walk_tree (tp, mostly_copy_tree_r, data, NULL); *walk_subtrees = 0; } /* Otherwise, mark the node as visited and keep looking. */ else TREE_VISITED (t) = 1; return NULL_TREE; } /* Unshare most of the shared trees rooted at *TP. */ static inline void copy_if_shared (tree *tp) { /* If the language requires deep unsharing, we need a pointer set to make sure we don't repeatedly unshare subtrees of unshareable nodes. */ struct pointer_set_t *visited = lang_hooks.deep_unsharing ? pointer_set_create () : NULL; walk_tree (tp, copy_if_shared_r, visited, NULL); if (visited) pointer_set_destroy (visited); } /* Unshare all the trees in BODY_P, a pointer into the body of FNDECL, and the bodies of any nested functions if we are unsharing the entire body of FNDECL. */ static void unshare_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); copy_if_shared (body_p); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unshare_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Callback for walk_tree to unmark the visited trees rooted at *TP. Subtrees are walked until the first unvisited node is encountered. */ static tree unmark_visited_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { tree t = *tp; /* If this node has been visited, unmark it and keep looking. */ if (TREE_VISITED (t)) TREE_VISITED (t) = 0; /* Otherwise, don't look any deeper. */ else *walk_subtrees = 0; return NULL_TREE; } /* Unmark the visited trees rooted at *TP. */ static inline void unmark_visited (tree *tp) { walk_tree (tp, unmark_visited_r, NULL, NULL); } /* Likewise, but mark all trees as not visited. */ static void unvisit_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); unmark_visited (body_p); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unvisit_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Unconditionally make an unshared copy of EXPR. This is used when using stored expressions which span multiple functions, such as BINFO_VTABLE, as the normal unsharing process can't tell that they're shared. */ tree unshare_expr (tree expr) { walk_tree (&expr, mostly_copy_tree_r, NULL, NULL); return expr; } /* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both contain statements and have a value. Assign its value to a temporary and give it void_type_node. Returns the temporary, or NULL_TREE if WRAPPER was already void. */ tree voidify_wrapper_expr (tree wrapper, tree temp) { tree type = TREE_TYPE (wrapper); if (type && !VOID_TYPE_P (type)) { tree *p; /* Set p to point to the body of the wrapper. Loop until we find something that isn't a wrapper. */ for (p = &wrapper; p && *p; ) { switch (TREE_CODE (*p)) { case BIND_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; /* For a BIND_EXPR, the body is operand 1. */ p = &BIND_EXPR_BODY (*p); break; case CLEANUP_POINT_EXPR: case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = &TREE_OPERAND (*p, 0); break; case STATEMENT_LIST: { tree_stmt_iterator i = tsi_last (*p); TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i); } break; case COMPOUND_EXPR: /* Advance to the last statement. Set all container types to void. */ for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1)) { TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; } break; default: goto out; } } out: if (p == NULL || IS_EMPTY_STMT (*p)) temp = NULL_TREE; else if (temp) { /* The wrapper is on the RHS of an assignment that we're pushing down. */ gcc_assert (TREE_CODE (temp) == INIT_EXPR || TREE_CODE (temp) == MODIFY_EXPR); TREE_OPERAND (temp, 1) = *p; *p = temp; } else { temp = create_tmp_var (type, "retval"); *p = build2 (INIT_EXPR, type, temp, *p); } return temp; } return NULL_TREE; } /* Prepare calls to builtins to SAVE and RESTORE the stack as well as a temporary through which they communicate. */ static void build_stack_save_restore (gimple *save, gimple *restore) { tree tmp_var; *save = gimple_build_call (implicit_built_in_decls[BUILT_IN_STACK_SAVE], 0); tmp_var = create_tmp_var (ptr_type_node, "saved_stack"); gimple_call_set_lhs (*save, tmp_var); *restore = gimple_build_call (implicit_built_in_decls[BUILT_IN_STACK_RESTORE], 1, tmp_var); } /* Gimplify a BIND_EXPR. Just voidify and recurse. */ static enum gimplify_status gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p) { tree bind_expr = *expr_p; bool old_save_stack = gimplify_ctxp->save_stack; tree t; gimple gimple_bind; gimple_seq body; tree temp = voidify_wrapper_expr (bind_expr, NULL); /* Mark variables seen in this bind expr. */ for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t)) { if (TREE_CODE (t) == VAR_DECL) { DECL_SEEN_IN_BIND_EXPR_P (t) = 1; if (DECL_HARD_REGISTER (t) && !is_global_var (t) && cfun) cfun->has_local_explicit_reg_vars = true; } /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if ((TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE) && !TREE_THIS_VOLATILE (t) && (TREE_CODE (t) == VAR_DECL && !DECL_HARD_REGISTER (t)) && !needs_to_live_in_memory (t)) DECL_GIMPLE_REG_P (t) = 1; } gimple_bind = gimple_build_bind (BIND_EXPR_VARS (bind_expr), NULL, BIND_EXPR_BLOCK (bind_expr)); gimple_push_bind_expr (gimple_bind); gimplify_ctxp->save_stack = false; /* Gimplify the body into the GIMPLE_BIND tuple's body. */ body = NULL; gimplify_stmt (&BIND_EXPR_BODY (bind_expr), &body); gimple_bind_set_body (gimple_bind, body); if (gimplify_ctxp->save_stack) { gimple stack_save, stack_restore, gs; gimple_seq cleanup, new_body; /* Save stack on entry and restore it on exit. Add a try_finally block to achieve this. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ build_stack_save_restore (&stack_save, &stack_restore); cleanup = new_body = NULL; gimplify_seq_add_stmt (&cleanup, stack_restore); gs = gimple_build_try (gimple_bind_body (gimple_bind), cleanup, GIMPLE_TRY_FINALLY); gimplify_seq_add_stmt (&new_body, stack_save); gimplify_seq_add_stmt (&new_body, gs); gimple_bind_set_body (gimple_bind, new_body); } gimplify_ctxp->save_stack = old_save_stack; gimple_pop_bind_expr (); gimplify_seq_add_stmt (pre_p, gimple_bind); if (temp) { *expr_p = temp; return GS_OK; } *expr_p = NULL_TREE; return GS_ALL_DONE; } /* Gimplify a RETURN_EXPR. If the expression to be returned is not a GIMPLE value, it is assigned to a new temporary and the statement is re-written to return the temporary. PRE_P points to the sequence where side effects that must happen before STMT should be stored. */ static enum gimplify_status gimplify_return_expr (tree stmt, gimple_seq *pre_p) { gimple ret; tree ret_expr = TREE_OPERAND (stmt, 0); tree result_decl, result; if (ret_expr == error_mark_node) return GS_ERROR; if (!ret_expr || TREE_CODE (ret_expr) == RESULT_DECL || ret_expr == error_mark_node) { gimple ret = gimple_build_return (ret_expr); gimple_set_no_warning (ret, TREE_NO_WARNING (stmt)); gimplify_seq_add_stmt (pre_p, ret); return GS_ALL_DONE; } if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))) result_decl = NULL_TREE; else { result_decl = TREE_OPERAND (ret_expr, 0); /* See through a return by reference. */ if (TREE_CODE (result_decl) == INDIRECT_REF) result_decl = TREE_OPERAND (result_decl, 0); gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR || TREE_CODE (ret_expr) == INIT_EXPR) && TREE_CODE (result_decl) == RESULT_DECL); } /* If aggregate_value_p is true, then we can return the bare RESULT_DECL. Recall that aggregate_value_p is FALSE for any aggregate type that is returned in registers. If we're returning values in registers, then we don't want to extend the lifetime of the RESULT_DECL, particularly across another call. In addition, for those aggregates for which hard_function_value generates a PARALLEL, we'll die during normal expansion of structure assignments; there's special code in expand_return to handle this case that does not exist in expand_expr. */ if (!result_decl) result = NULL_TREE; else if (aggregate_value_p (result_decl, TREE_TYPE (current_function_decl))) { if (TREE_CODE (DECL_SIZE (result_decl)) != INTEGER_CST) { if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (result_decl))) gimplify_type_sizes (TREE_TYPE (result_decl), pre_p); /* Note that we don't use gimplify_vla_decl because the RESULT_DECL should be effectively allocated by the caller, i.e. all calls to this function must be subject to the Return Slot Optimization. */ gimplify_one_sizepos (&DECL_SIZE (result_decl), pre_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (result_decl), pre_p); } result = result_decl; } else if (gimplify_ctxp->return_temp) result = gimplify_ctxp->return_temp; else { result = create_tmp_reg (TREE_TYPE (result_decl), NULL); /* ??? With complex control flow (usually involving abnormal edges), we can wind up warning about an uninitialized value for this. Due to how this variable is constructed and initialized, this is never true. Give up and never warn. */ TREE_NO_WARNING (result) = 1; gimplify_ctxp->return_temp = result; } /* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use. Then gimplify the whole thing. */ if (result != result_decl) TREE_OPERAND (ret_expr, 0) = result; gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p); ret = gimple_build_return (result); gimple_set_no_warning (ret, TREE_NO_WARNING (stmt)); gimplify_seq_add_stmt (pre_p, ret); return GS_ALL_DONE; } static void gimplify_vla_decl (tree decl, gimple_seq *seq_p) { /* This is a variable-sized decl. Simplify its size and mark it for deferred expansion. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ tree t, addr, ptr_type; gimplify_one_sizepos (&DECL_SIZE (decl), seq_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), seq_p); /* All occurrences of this decl in final gimplified code will be replaced by indirection. Setting DECL_VALUE_EXPR does two things: First, it lets the rest of the gimplifier know what replacement to use. Second, it lets the debug info know where to find the value. */ ptr_type = build_pointer_type (TREE_TYPE (decl)); addr = create_tmp_var (ptr_type, get_name (decl)); DECL_IGNORED_P (addr) = 0; t = build_fold_indirect_ref (addr); SET_DECL_VALUE_EXPR (decl, t); DECL_HAS_VALUE_EXPR_P (decl) = 1; t = built_in_decls[BUILT_IN_ALLOCA]; t = build_call_expr (t, 1, DECL_SIZE_UNIT (decl)); /* The call has been built for a variable-sized object. */ ALLOCA_FOR_VAR_P (t) = 1; t = fold_convert (ptr_type, t); t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t); gimplify_and_add (t, seq_p); /* Indicate that we need to restore the stack level when the enclosing BIND_EXPR is exited. */ gimplify_ctxp->save_stack = true; } /* Gimplifies a DECL_EXPR node *STMT_P by making any necessary allocation and initialization explicit. */ static enum gimplify_status gimplify_decl_expr (tree *stmt_p, gimple_seq *seq_p) { tree stmt = *stmt_p; tree decl = DECL_EXPR_DECL (stmt); *stmt_p = NULL_TREE; if (TREE_TYPE (decl) == error_mark_node) return GS_ERROR; if ((TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == VAR_DECL) && !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl))) gimplify_type_sizes (TREE_TYPE (decl), seq_p); if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl)) { tree init = DECL_INITIAL (decl); if (TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST || (!TREE_STATIC (decl) && flag_stack_check == GENERIC_STACK_CHECK && compare_tree_int (DECL_SIZE_UNIT (decl), STACK_CHECK_MAX_VAR_SIZE) > 0)) gimplify_vla_decl (decl, seq_p); /* Some front ends do not explicitly declare all anonymous artificial variables. We compensate here by declaring the variables, though it would be better if the front ends would explicitly declare them. */ if (!DECL_SEEN_IN_BIND_EXPR_P (decl) && DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE) gimple_add_tmp_var (decl); if (init && init != error_mark_node) { if (!TREE_STATIC (decl)) { DECL_INITIAL (decl) = NULL_TREE; init = build2 (INIT_EXPR, void_type_node, decl, init); gimplify_and_add (init, seq_p); ggc_free (init); } else /* We must still examine initializers for static variables as they may contain a label address. */ walk_tree (&init, force_labels_r, NULL, NULL); } } return GS_ALL_DONE; } /* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body and replacing the LOOP_EXPR with goto, but if the loop contains an EXIT_EXPR, we need to append a label for it to jump to. */ static enum gimplify_status gimplify_loop_expr (tree *expr_p, gimple_seq *pre_p) { tree saved_label = gimplify_ctxp->exit_label; tree start_label = create_artificial_label (UNKNOWN_LOCATION); gimplify_seq_add_stmt (pre_p, gimple_build_label (start_label)); gimplify_ctxp->exit_label = NULL_TREE; gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p); gimplify_seq_add_stmt (pre_p, gimple_build_goto (start_label)); if (gimplify_ctxp->exit_label) gimplify_seq_add_stmt (pre_p, gimple_build_label (gimplify_ctxp->exit_label)); gimplify_ctxp->exit_label = saved_label; *expr_p = NULL; return GS_ALL_DONE; } /* Gimplifies a statement list onto a sequence. These may be created either by an enlightened front-end, or by shortcut_cond_expr. */ static enum gimplify_status gimplify_statement_list (tree *expr_p, gimple_seq *pre_p) { tree temp = voidify_wrapper_expr (*expr_p, NULL); tree_stmt_iterator i = tsi_start (*expr_p); while (!tsi_end_p (i)) { gimplify_stmt (tsi_stmt_ptr (i), pre_p); tsi_delink (&i); } if (temp) { *expr_p = temp; return GS_OK; } return GS_ALL_DONE; } /* Compare two case labels. Because the front end should already have made sure that case ranges do not overlap, it is enough to only compare the CASE_LOW values of each case label. */ static int compare_case_labels (const void *p1, const void *p2) { const_tree const case1 = *(const_tree const*)p1; const_tree const case2 = *(const_tree const*)p2; /* The 'default' case label always goes first. */ if (!CASE_LOW (case1)) return -1; else if (!CASE_LOW (case2)) return 1; else return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2)); } /* Sort the case labels in LABEL_VEC in place in ascending order. */ void sort_case_labels (VEC(tree,heap)* label_vec) { VEC_qsort (tree, label_vec, compare_case_labels); } /* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can branch to. */ static enum gimplify_status gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p) { tree switch_expr = *expr_p; gimple_seq switch_body_seq = NULL; enum gimplify_status ret; ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val, fb_rvalue); if (ret == GS_ERROR || ret == GS_UNHANDLED) return ret; if (SWITCH_BODY (switch_expr)) { VEC (tree,heap) *labels; VEC (tree,heap) *saved_labels; tree default_case = NULL_TREE; size_t i, len; gimple gimple_switch; /* If someone can be bothered to fill in the labels, they can be bothered to null out the body too. */ gcc_assert (!SWITCH_LABELS (switch_expr)); /* save old labels, get new ones from body, then restore the old labels. Save all the things from the switch body to append after. */ saved_labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8); gimplify_stmt (&SWITCH_BODY (switch_expr), &switch_body_seq); labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = saved_labels; i = 0; while (i < VEC_length (tree, labels)) { tree elt = VEC_index (tree, labels, i); tree low = CASE_LOW (elt); bool remove_element = FALSE; if (low) { /* Discard empty ranges. */ tree high = CASE_HIGH (elt); if (high && tree_int_cst_lt (high, low)) remove_element = TRUE; } else { /* The default case must be the last label in the list. */ gcc_assert (!default_case); default_case = elt; remove_element = TRUE; } if (remove_element) VEC_ordered_remove (tree, labels, i); else i++; } len = i; if (!VEC_empty (tree, labels)) sort_case_labels (labels); if (!default_case) { tree type = TREE_TYPE (switch_expr); /* If the switch has no default label, add one, so that we jump around the switch body. If the labels already cover the whole range of type, add the default label pointing to one of the existing labels. */ if (type == void_type_node) type = TREE_TYPE (SWITCH_COND (switch_expr)); if (len && INTEGRAL_TYPE_P (type) && TYPE_MIN_VALUE (type) && TYPE_MAX_VALUE (type) && tree_int_cst_equal (CASE_LOW (VEC_index (tree, labels, 0)), TYPE_MIN_VALUE (type))) { tree low, high = CASE_HIGH (VEC_index (tree, labels, len - 1)); if (!high) high = CASE_LOW (VEC_index (tree, labels, len - 1)); if (tree_int_cst_equal (high, TYPE_MAX_VALUE (type))) { for (i = 1; i < len; i++) { high = CASE_LOW (VEC_index (tree, labels, i)); low = CASE_HIGH (VEC_index (tree, labels, i - 1)); if (!low) low = CASE_LOW (VEC_index (tree, labels, i - 1)); if ((TREE_INT_CST_LOW (low) + 1 != TREE_INT_CST_LOW (high)) || (TREE_INT_CST_HIGH (low) + (TREE_INT_CST_LOW (high) == 0) != TREE_INT_CST_HIGH (high))) break; } if (i == len) default_case = build3 (CASE_LABEL_EXPR, void_type_node, NULL_TREE, NULL_TREE, CASE_LABEL (VEC_index (tree, labels, 0))); } } if (!default_case) { gimple new_default; default_case = build3 (CASE_LABEL_EXPR, void_type_node, NULL_TREE, NULL_TREE, create_artificial_label (UNKNOWN_LOCATION)); new_default = gimple_build_label (CASE_LABEL (default_case)); gimplify_seq_add_stmt (&switch_body_seq, new_default); } } gimple_switch = gimple_build_switch_vec (SWITCH_COND (switch_expr), default_case, labels); gimplify_seq_add_stmt (pre_p, gimple_switch); gimplify_seq_add_seq (pre_p, switch_body_seq); VEC_free(tree, heap, labels); } else gcc_assert (SWITCH_LABELS (switch_expr)); return GS_ALL_DONE; } static enum gimplify_status gimplify_case_label_expr (tree *expr_p, gimple_seq *pre_p) { struct gimplify_ctx *ctxp; gimple gimple_label; /* Invalid OpenMP programs can play Duff's Device type games with #pragma omp parallel. At least in the C front end, we don't detect such invalid branches until after gimplification. */ for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context) if (ctxp->case_labels) break; gimple_label = gimple_build_label (CASE_LABEL (*expr_p)); VEC_safe_push (tree, heap, ctxp->case_labels, *expr_p); gimplify_seq_add_stmt (pre_p, gimple_label); return GS_ALL_DONE; } /* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first if necessary. */ tree build_and_jump (tree *label_p) { if (label_p == NULL) /* If there's nowhere to jump, just fall through. */ return NULL_TREE; if (*label_p == NULL_TREE) { tree label = create_artificial_label (UNKNOWN_LOCATION); *label_p = label; } return build1 (GOTO_EXPR, void_type_node, *label_p); } /* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR. This also involves building a label to jump to and communicating it to gimplify_loop_expr through gimplify_ctxp->exit_label. */ static enum gimplify_status gimplify_exit_expr (tree *expr_p) { tree cond = TREE_OPERAND (*expr_p, 0); tree expr; expr = build_and_jump (&gimplify_ctxp->exit_label); expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE); *expr_p = expr; return GS_OK; } /* A helper function to be called via walk_tree. Mark all labels under *TP as being forced. To be called for DECL_INITIAL of static variables. */ tree force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { if (TYPE_P (*tp)) *walk_subtrees = 0; if (TREE_CODE (*tp) == LABEL_DECL) FORCED_LABEL (*tp) = 1; return NULL_TREE; } /* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is different from its canonical type, wrap the whole thing inside a NOP_EXPR and force the type of the COMPONENT_REF to be the canonical type. The canonical type of a COMPONENT_REF is the type of the field being referenced--unless the field is a bit-field which can be read directly in a smaller mode, in which case the canonical type is the sign-appropriate type corresponding to that mode. */ static void canonicalize_component_ref (tree *expr_p) { tree expr = *expr_p; tree type; gcc_assert (TREE_CODE (expr) == COMPONENT_REF); if (INTEGRAL_TYPE_P (TREE_TYPE (expr))) type = TREE_TYPE (get_unwidened (expr, NULL_TREE)); else type = TREE_TYPE (TREE_OPERAND (expr, 1)); /* One could argue that all the stuff below is not necessary for the non-bitfield case and declare it a FE error if type adjustment would be needed. */ if (TREE_TYPE (expr) != type) { #ifdef ENABLE_TYPES_CHECKING tree old_type = TREE_TYPE (expr); #endif int type_quals; /* We need to preserve qualifiers and propagate them from operand 0. */ type_quals = TYPE_QUALS (type) | TYPE_QUALS (TREE_TYPE (TREE_OPERAND (expr, 0))); if (TYPE_QUALS (type) != type_quals) type = build_qualified_type (TYPE_MAIN_VARIANT (type), type_quals); /* Set the type of the COMPONENT_REF to the underlying type. */ TREE_TYPE (expr) = type; #ifdef ENABLE_TYPES_CHECKING /* It is now a FE error, if the conversion from the canonical type to the original expression type is not useless. */ gcc_assert (useless_type_conversion_p (old_type, type)); #endif } } /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR by converting T array[U]; (T *)&array ==> &array[L] where L is the lower bound. For simplicity, only do this for constant lower bound. The constraint is that the type of &array[L] is trivially convertible to T *. */ static void canonicalize_addr_expr (tree *expr_p) { tree expr = *expr_p; tree addr_expr = TREE_OPERAND (expr, 0); tree datype, ddatype, pddatype; /* We simplify only conversions from an ADDR_EXPR to a pointer type. */ if (!POINTER_TYPE_P (TREE_TYPE (expr)) || TREE_CODE (addr_expr) != ADDR_EXPR) return; /* The addr_expr type should be a pointer to an array. */ datype = TREE_TYPE (TREE_TYPE (addr_expr)); if (TREE_CODE (datype) != ARRAY_TYPE) return; /* The pointer to element type shall be trivially convertible to the expression pointer type. */ ddatype = TREE_TYPE (datype); pddatype = build_pointer_type (ddatype); if (!useless_type_conversion_p (TYPE_MAIN_VARIANT (TREE_TYPE (expr)), pddatype)) return; /* The lower bound and element sizes must be constant. */ if (!TYPE_SIZE_UNIT (ddatype) || TREE_CODE (TYPE_SIZE_UNIT (ddatype)) != INTEGER_CST || !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype)) || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST) return; /* All checks succeeded. Build a new node to merge the cast. */ *expr_p = build4 (ARRAY_REF, ddatype, TREE_OPERAND (addr_expr, 0), TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), NULL_TREE, NULL_TREE); *expr_p = build1 (ADDR_EXPR, pddatype, *expr_p); /* We can have stripped a required restrict qualifier above. */ if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p))) *expr_p = fold_convert (TREE_TYPE (expr), *expr_p); } /* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions underneath as appropriate. */ static enum gimplify_status gimplify_conversion (tree *expr_p) { tree tem; location_t loc = EXPR_LOCATION (*expr_p); gcc_assert (CONVERT_EXPR_P (*expr_p)); /* Then strip away all but the outermost conversion. */ STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0)); /* And remove the outermost conversion if it's useless. */ if (tree_ssa_useless_type_conversion (*expr_p)) *expr_p = TREE_OPERAND (*expr_p, 0); /* Attempt to avoid NOP_EXPR by producing reference to a subtype. For example this fold (subclass *)&A into &A->subclass avoiding a need for statement. */ if (CONVERT_EXPR_P (*expr_p) && POINTER_TYPE_P (TREE_TYPE (*expr_p)) && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (*expr_p, 0))) && (tem = maybe_fold_offset_to_address (EXPR_LOCATION (*expr_p), TREE_OPERAND (*expr_p, 0), integer_zero_node, TREE_TYPE (*expr_p))) != NULL_TREE) *expr_p = tem; /* If we still have a conversion at the toplevel, then canonicalize some constructs. */ if (CONVERT_EXPR_P (*expr_p)) { tree sub = TREE_OPERAND (*expr_p, 0); /* If a NOP conversion is changing the type of a COMPONENT_REF expression, then canonicalize its type now in order to expose more redundant conversions. */ if (TREE_CODE (sub) == COMPONENT_REF) canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0)); /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR. */ else if (TREE_CODE (sub) == ADDR_EXPR) canonicalize_addr_expr (expr_p); } /* If we have a conversion to a non-register type force the use of a VIEW_CONVERT_EXPR instead. */ if (CONVERT_EXPR_P (*expr_p) && !is_gimple_reg_type (TREE_TYPE (*expr_p))) *expr_p = fold_build1_loc (loc, VIEW_CONVERT_EXPR, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0)); return GS_OK; } /* Nonlocal VLAs seen in the current function. */ static struct pointer_set_t *nonlocal_vlas; /* Gimplify a VAR_DECL or PARM_DECL. Returns GS_OK if we expanded a DECL_VALUE_EXPR, and it's worth re-examining things. */ static enum gimplify_status gimplify_var_or_parm_decl (tree *expr_p) { tree decl = *expr_p; /* ??? If this is a local variable, and it has not been seen in any outer BIND_EXPR, then it's probably the result of a duplicate declaration, for which we've already issued an error. It would be really nice if the front end wouldn't leak these at all. Currently the only known culprit is C++ destructors, as seen in g++.old-deja/g++.jason/binding.C. */ if (TREE_CODE (decl) == VAR_DECL && !DECL_SEEN_IN_BIND_EXPR_P (decl) && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl) && decl_function_context (decl) == current_function_decl) { gcc_assert (seen_error ()); return GS_ERROR; } /* If the decl is an alias for another expression, substitute it now. */ if (DECL_HAS_VALUE_EXPR_P (decl)) { tree value_expr = DECL_VALUE_EXPR (decl); /* For referenced nonlocal VLAs add a decl for debugging purposes to the current function. */ if (TREE_CODE (decl) == VAR_DECL && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST && nonlocal_vlas != NULL && TREE_CODE (value_expr) == INDIRECT_REF && TREE_CODE (TREE_OPERAND (value_expr, 0)) == VAR_DECL && decl_function_context (decl) != current_function_decl) { if (!pointer_set_insert (nonlocal_vlas, decl)) { tree copy = copy_node (decl), block; lang_hooks.dup_lang_specific_decl (copy); SET_DECL_RTL (copy, 0); TREE_USED (copy) = 1; block = DECL_INITIAL (current_function_decl); DECL_CHAIN (copy) = BLOCK_VARS (block); BLOCK_VARS (block) = copy; SET_DECL_VALUE_EXPR (copy, unshare_expr (value_expr)); DECL_HAS_VALUE_EXPR_P (copy) = 1; } } *expr_p = unshare_expr (value_expr); return GS_OK; } return GS_ALL_DONE; } /* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR node *EXPR_P. compound_lval : min_lval '[' val ']' | min_lval '.' ID | compound_lval '[' val ']' | compound_lval '.' ID This is not part of the original SIMPLE definition, which separates array and member references, but it seems reasonable to handle them together. Also, this way we don't run into problems with union aliasing; gcc requires that for accesses through a union to alias, the union reference must be explicit, which was not always the case when we were splitting up array and member refs. PRE_P points to the sequence where side effects that must happen before *EXPR_P should be stored. POST_P points to the sequence where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, fallback_t fallback) { tree *p; VEC(tree,heap) *stack; enum gimplify_status ret = GS_ALL_DONE, tret; int i; location_t loc = EXPR_LOCATION (*expr_p); tree expr = *expr_p; /* Create a stack of the subexpressions so later we can walk them in order from inner to outer. */ stack = VEC_alloc (tree, heap, 10); /* We can handle anything that get_inner_reference can deal with. */ for (p = expr_p; ; p = &TREE_OPERAND (*p, 0)) { restart: /* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */ if (TREE_CODE (*p) == INDIRECT_REF) *p = fold_indirect_ref_loc (loc, *p); if (handled_component_p (*p)) ; /* Expand DECL_VALUE_EXPR now. In some cases that may expose additional COMPONENT_REFs. */ else if ((TREE_CODE (*p) == VAR_DECL || TREE_CODE (*p) == PARM_DECL) && gimplify_var_or_parm_decl (p) == GS_OK) goto restart; else break; VEC_safe_push (tree, heap, stack, *p); } gcc_assert (VEC_length (tree, stack)); /* Now STACK is a stack of pointers to all the refs we've walked through and P points to the innermost expression. Java requires that we elaborated nodes in source order. That means we must gimplify the inner expression followed by each of the indices, in order. But we can't gimplify the inner expression until we deal with any variable bounds, sizes, or positions in order to deal with PLACEHOLDER_EXPRs. So we do this in three steps. First we deal with the annotations for any variables in the components, then we gimplify the base, then we gimplify any indices, from left to right. */ for (i = VEC_length (tree, stack) - 1; i >= 0; i--) { tree t = VEC_index (tree, stack, i); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the low bound and element type size and put them into the ARRAY_REF. If these values are set, they have already been gimplified. */ if (TREE_OPERAND (t, 2) == NULL_TREE) { tree low = unshare_expr (array_ref_low_bound (t)); if (!is_gimple_min_invariant (low)) { TREE_OPERAND (t, 2) = low; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } if (!TREE_OPERAND (t, 3)) { tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); tree elmt_size = unshare_expr (array_ref_element_size (t)); tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type)); /* Divide the element size by the alignment of the element type (above). */ elmt_size = size_binop_loc (loc, EXACT_DIV_EXPR, elmt_size, factor); if (!is_gimple_min_invariant (elmt_size)) { TREE_OPERAND (t, 3) = elmt_size; tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } } else if (TREE_CODE (t) == COMPONENT_REF) { /* Set the field offset into T and gimplify it. */ if (!TREE_OPERAND (t, 2)) { tree offset = unshare_expr (component_ref_field_offset (t)); tree field = TREE_OPERAND (t, 1); tree factor = size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT); /* Divide the offset by its alignment. */ offset = size_binop_loc (loc, EXACT_DIV_EXPR, offset, factor); if (!is_gimple_min_invariant (offset)) { TREE_OPERAND (t, 2) = offset; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } } } /* Step 2 is to gimplify the base expression. Make sure lvalue is set so as to match the min_lval predicate. Failure to do so may result in the creation of large aggregate temporaries. */ tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback | fb_lvalue); ret = MIN (ret, tret); /* And finally, the indices and operands to BIT_FIELD_REF. During this loop we also remove any useless conversions. */ for (; VEC_length (tree, stack) > 0; ) { tree t = VEC_pop (tree, stack); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the dimension. */ if (!is_gimple_min_invariant (TREE_OPERAND (t, 1))) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); } } else if (TREE_CODE (t) == BIT_FIELD_REF) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); } STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0)); /* The innermost expression P may have originally had TREE_SIDE_EFFECTS set which would have caused all the outer expressions in *EXPR_P leading to P to also have had TREE_SIDE_EFFECTS set. */ recalculate_side_effects (t); } /* If the outermost expression is a COMPONENT_REF, canonicalize its type. */ if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF) { canonicalize_component_ref (expr_p); } VEC_free (tree, heap, stack); gcc_assert (*expr_p == expr || ret != GS_ALL_DONE); return ret; } /* Gimplify the self modifying expression pointed to by EXPR_P (++, --, +=, -=). PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_self_mod_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value) { enum tree_code code; tree lhs, lvalue, rhs, t1; gimple_seq post = NULL, *orig_post_p = post_p; bool postfix; enum tree_code arith_code; enum gimplify_status ret; location_t loc = EXPR_LOCATION (*expr_p); code = TREE_CODE (*expr_p); gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR || code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR); /* Prefix or postfix? */ if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR) /* Faster to treat as prefix if result is not used. */ postfix = want_value; else postfix = false; /* For postfix, make sure the inner expression's post side effects are executed after side effects from this expression. */ if (postfix) post_p = &post; /* Add or subtract? */ if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) arith_code = PLUS_EXPR; else arith_code = MINUS_EXPR; /* Gimplify the LHS into a GIMPLE lvalue. */ lvalue = TREE_OPERAND (*expr_p, 0); ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; /* Extract the operands to the arithmetic operation. */ lhs = lvalue; rhs = TREE_OPERAND (*expr_p, 1); /* For postfix operator, we evaluate the LHS to an rvalue and then use that as the result value and in the postqueue operation. We also make sure to make lvalue a minimal lval, see gcc.c-torture/execute/20040313-1.c for an example where this matters. */ if (postfix) { if (!is_gimple_min_lval (lvalue)) { mark_addressable (lvalue); lvalue = build_fold_addr_expr_loc (input_location, lvalue); gimplify_expr (&lvalue, pre_p, post_p, is_gimple_val, fb_rvalue); lvalue = build_fold_indirect_ref_loc (input_location, lvalue); } ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) return ret; } /* For POINTERs increment, use POINTER_PLUS_EXPR. */ if (POINTER_TYPE_P (TREE_TYPE (lhs))) { rhs = fold_convert_loc (loc, sizetype, rhs); if (arith_code == MINUS_EXPR) rhs = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (rhs), rhs); arith_code = POINTER_PLUS_EXPR; } t1 = build2 (arith_code, TREE_TYPE (*expr_p), lhs, rhs); if (postfix) { gimplify_assign (lvalue, t1, orig_post_p); gimplify_seq_add_seq (orig_post_p, post); *expr_p = lhs; return GS_ALL_DONE; } else { *expr_p = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1); return GS_OK; } } /* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */ static void maybe_with_size_expr (tree *expr_p) { tree expr = *expr_p; tree type = TREE_TYPE (expr); tree size; /* If we've already wrapped this or the type is error_mark_node, we can't do anything. */ if (TREE_CODE (expr) == WITH_SIZE_EXPR || type == error_mark_node) return; /* If the size isn't known or is a constant, we have nothing to do. */ size = TYPE_SIZE_UNIT (type); if (!size || TREE_CODE (size) == INTEGER_CST) return; /* Otherwise, make a WITH_SIZE_EXPR. */ size = unshare_expr (size); size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr); *expr_p = build2 (WITH_SIZE_EXPR, type, expr, size); } /* Helper for gimplify_call_expr. Gimplify a single argument *ARG_P Store any side-effects in PRE_P. CALL_LOCATION is the location of the CALL_EXPR. */ static enum gimplify_status gimplify_arg (tree *arg_p, gimple_seq *pre_p, location_t call_location) { bool (*test) (tree); fallback_t fb; /* In general, we allow lvalues for function arguments to avoid extra overhead of copying large aggregates out of even larger aggregates into temporaries only to copy the temporaries to the argument list. Make optimizers happy by pulling out to temporaries those types that fit in registers. */ if (is_gimple_reg_type (TREE_TYPE (*arg_p))) test = is_gimple_val, fb = fb_rvalue; else test = is_gimple_lvalue, fb = fb_either; /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (arg_p); /* FIXME diagnostics: This will mess up gcc.dg/Warray-bounds.c. */ /* Make sure arguments have the same location as the function call itself. */ protected_set_expr_location (*arg_p, call_location); /* There is a sequence point before a function call. Side effects in the argument list must occur before the actual call. So, when gimplifying arguments, force gimplify_expr to use an internal post queue which is then appended to the end of PRE_P. */ return gimplify_expr (arg_p, pre_p, NULL, test, fb); } /* Gimplify the CALL_EXPR node *EXPR_P into the GIMPLE sequence PRE_P. WANT_VALUE is true if the result of the call is desired. */ static enum gimplify_status gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value) { tree fndecl, parms, p; enum gimplify_status ret; int i, nargs; gimple call; bool builtin_va_start_p = FALSE; location_t loc = EXPR_LOCATION (*expr_p); gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR); /* For reliable diagnostics during inlining, it is necessary that every call_expr be annotated with file and line. */ if (! EXPR_HAS_LOCATION (*expr_p)) SET_EXPR_LOCATION (*expr_p, input_location); /* This may be a call to a builtin function. Builtin function calls may be transformed into different (and more efficient) builtin function calls under certain circumstances. Unfortunately, gimplification can muck things up enough that the builtin expanders are not aware that certain transformations are still valid. So we attempt transformation/gimplification of the call before we gimplify the CALL_EXPR. At this time we do not manage to transform all calls in the same manner as the expanders do, but we do transform most of them. */ fndecl = get_callee_fndecl (*expr_p); if (fndecl && DECL_BUILT_IN (fndecl)) { tree new_tree = fold_call_expr (input_location, *expr_p, !want_value); if (new_tree && new_tree != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new_tree; return GS_OK; } if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_VA_START) { builtin_va_start_p = TRUE; if (call_expr_nargs (*expr_p) < 2) { error ("too few arguments to function %<va_start%>"); *expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p)); return GS_OK; } if (fold_builtin_next_arg (*expr_p, true)) { *expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p)); return GS_OK; } } } /* There is a sequence point before the call, so any side effects in the calling expression must occur before the actual call. Force gimplify_expr to use an internal post queue. */ ret = gimplify_expr (&CALL_EXPR_FN (*expr_p), pre_p, NULL, is_gimple_call_addr, fb_rvalue); nargs = call_expr_nargs (*expr_p); /* Get argument types for verification. */ fndecl = get_callee_fndecl (*expr_p); parms = NULL_TREE; if (fndecl) parms = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); else if (POINTER_TYPE_P (TREE_TYPE (CALL_EXPR_FN (*expr_p)))) parms = TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (*expr_p)))); if (fndecl && DECL_ARGUMENTS (fndecl)) p = DECL_ARGUMENTS (fndecl); else if (parms) p = parms; else p = NULL_TREE; for (i = 0; i < nargs && p; i++, p = TREE_CHAIN (p)) ; /* If the last argument is __builtin_va_arg_pack () and it is not passed as a named argument, decrease the number of CALL_EXPR arguments and set instead the CALL_EXPR_VA_ARG_PACK flag. */ if (!p && i < nargs && TREE_CODE (CALL_EXPR_ARG (*expr_p, nargs - 1)) == CALL_EXPR) { tree last_arg = CALL_EXPR_ARG (*expr_p, nargs - 1); tree last_arg_fndecl = get_callee_fndecl (last_arg); if (last_arg_fndecl && TREE_CODE (last_arg_fndecl) == FUNCTION_DECL && DECL_BUILT_IN_CLASS (last_arg_fndecl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (last_arg_fndecl) == BUILT_IN_VA_ARG_PACK) { tree call = *expr_p; --nargs; *expr_p = build_call_array_loc (loc, TREE_TYPE (call), CALL_EXPR_FN (call), nargs, CALL_EXPR_ARGP (call)); /* Copy all CALL_EXPR flags, location and block, except CALL_EXPR_VA_ARG_PACK flag. */ CALL_EXPR_STATIC_CHAIN (*expr_p) = CALL_EXPR_STATIC_CHAIN (call); CALL_EXPR_TAILCALL (*expr_p) = CALL_EXPR_TAILCALL (call); CALL_EXPR_RETURN_SLOT_OPT (*expr_p) = CALL_EXPR_RETURN_SLOT_OPT (call); CALL_FROM_THUNK_P (*expr_p) = CALL_FROM_THUNK_P (call); CALL_CANNOT_INLINE_P (*expr_p) = CALL_CANNOT_INLINE_P (call); SET_EXPR_LOCATION (*expr_p, EXPR_LOCATION (call)); TREE_BLOCK (*expr_p) = TREE_BLOCK (call); /* Set CALL_EXPR_VA_ARG_PACK. */ CALL_EXPR_VA_ARG_PACK (*expr_p) = 1; } } /* Finally, gimplify the function arguments. */ if (nargs > 0) { for (i = (PUSH_ARGS_REVERSED ? nargs - 1 : 0); PUSH_ARGS_REVERSED ? i >= 0 : i < nargs; PUSH_ARGS_REVERSED ? i-- : i++) { enum gimplify_status t; /* Avoid gimplifying the second argument to va_start, which needs to be the plain PARM_DECL. */ if ((i != 1) || !builtin_va_start_p) { t = gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p, EXPR_LOCATION (*expr_p)); if (t == GS_ERROR) ret = GS_ERROR; } } } /* Verify the function result. */ if (want_value && fndecl && VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl)))) { error_at (loc, "using result of function returning %<void%>"); ret = GS_ERROR; } /* Try this again in case gimplification exposed something. */ if (ret != GS_ERROR) { tree new_tree = fold_call_expr (input_location, *expr_p, !want_value); if (new_tree && new_tree != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new_tree; return GS_OK; } } else { *expr_p = error_mark_node; return GS_ERROR; } /* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its decl. This allows us to eliminate redundant or useless calls to "const" functions. */ if (TREE_CODE (*expr_p) == CALL_EXPR) { int flags = call_expr_flags (*expr_p); if (flags & (ECF_CONST | ECF_PURE) /* An infinite loop is considered a side effect. */ && !(flags & (ECF_LOOPING_CONST_OR_PURE))) TREE_SIDE_EFFECTS (*expr_p) = 0; } /* If the value is not needed by the caller, emit a new GIMPLE_CALL and clear *EXPR_P. Otherwise, leave *EXPR_P in its gimplified form and delegate the creation of a GIMPLE_CALL to gimplify_modify_expr. This is always possible because when WANT_VALUE is true, the caller wants the result of this call into a temporary, which means that we will emit an INIT_EXPR in internal_get_tmp_var which will then be handled by gimplify_modify_expr. */ if (!want_value) { /* The CALL_EXPR in *EXPR_P is already in GIMPLE form, so all we have to do is replicate it as a GIMPLE_CALL tuple. */ gimple_stmt_iterator gsi; call = gimple_build_call_from_tree (*expr_p); gimplify_seq_add_stmt (pre_p, call); gsi = gsi_last (*pre_p); fold_stmt (&gsi); *expr_p = NULL_TREE; } return ret; } /* Handle shortcut semantics in the predicate operand of a COND_EXPR by rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs. TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the condition is true or false, respectively. If null, we should generate our own to skip over the evaluation of this specific expression. LOCUS is the source location of the COND_EXPR. This function is the tree equivalent of do_jump. shortcut_cond_r should only be called by shortcut_cond_expr. */ static tree shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p, location_t locus) { tree local_label = NULL_TREE; tree t, expr = NULL; /* OK, it's not a simple case; we need to pull apart the COND_EXPR to retain the shortcut semantics. Just insert the gotos here; shortcut_cond_expr will append the real blocks later. */ if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { location_t new_locus; /* Turn if (a && b) into if (a); else goto no; if (b) goto yes; else goto no; (no:) */ if (false_label_p == NULL) false_label_p = &local_label; /* Keep the original source location on the first 'if'. */ t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p, locus); append_to_statement_list (t, &expr); /* Set the source location of the && on the second 'if'. */ new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus; t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p, new_locus); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { location_t new_locus; /* Turn if (a || b) into if (a) goto yes; if (b) goto yes; else goto no; (yes:) */ if (true_label_p == NULL) true_label_p = &local_label; /* Keep the original source location on the first 'if'. */ t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL, locus); append_to_statement_list (t, &expr); /* Set the source location of the || on the second 'if'. */ new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus; t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p, new_locus); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == COND_EXPR) { location_t new_locus; /* As long as we're messing with gotos, turn if (a ? b : c) into if (a) if (b) goto yes; else goto no; else if (c) goto yes; else goto no; */ /* Keep the original source location on the first 'if'. Set the source location of the ? on the second 'if'. */ new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus; expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0), shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p, locus), shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p, false_label_p, new_locus)); } else { expr = build3 (COND_EXPR, void_type_node, pred, build_and_jump (true_label_p), build_and_jump (false_label_p)); SET_EXPR_LOCATION (expr, locus); } if (local_label) { t = build1 (LABEL_EXPR, void_type_node, local_label); append_to_statement_list (t, &expr); } return expr; } /* Given a conditional expression EXPR with short-circuit boolean predicates using TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR, break the predicate appart into the equivalent sequence of conditionals. */ static tree shortcut_cond_expr (tree expr) { tree pred = TREE_OPERAND (expr, 0); tree then_ = TREE_OPERAND (expr, 1); tree else_ = TREE_OPERAND (expr, 2); tree true_label, false_label, end_label, t; tree *true_label_p; tree *false_label_p; bool emit_end, emit_false, jump_over_else; bool then_se = then_ && TREE_SIDE_EFFECTS (then_); bool else_se = else_ && TREE_SIDE_EFFECTS (else_); /* First do simple transformations. */ if (!else_se) { /* If there is no 'else', turn if (a && b) then c into if (a) if (b) then c. */ while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { /* Keep the original source location on the first 'if'. */ location_t locus = EXPR_LOC_OR_HERE (expr); TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); /* Set the source location of the && on the second 'if'. */ if (EXPR_HAS_LOCATION (pred)) SET_EXPR_LOCATION (expr, EXPR_LOCATION (pred)); then_ = shortcut_cond_expr (expr); then_se = then_ && TREE_SIDE_EFFECTS (then_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE); SET_EXPR_LOCATION (expr, locus); } } if (!then_se) { /* If there is no 'then', turn if (a || b); else d into if (a); else if (b); else d. */ while (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { /* Keep the original source location on the first 'if'. */ location_t locus = EXPR_LOC_OR_HERE (expr); TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); /* Set the source location of the || on the second 'if'. */ if (EXPR_HAS_LOCATION (pred)) SET_EXPR_LOCATION (expr, EXPR_LOCATION (pred)); else_ = shortcut_cond_expr (expr); else_se = else_ && TREE_SIDE_EFFECTS (else_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_); SET_EXPR_LOCATION (expr, locus); } } /* If we're done, great. */ if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR && TREE_CODE (pred) != TRUTH_ORIF_EXPR) return expr; /* Otherwise we need to mess with gotos. Change if (a) c; else d; to if (a); else goto no; c; goto end; no: d; end: and recursively gimplify the condition. */ true_label = false_label = end_label = NULL_TREE; /* If our arms just jump somewhere, hijack those labels so we don't generate jumps to jumps. */ if (then_ && TREE_CODE (then_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL) { true_label = GOTO_DESTINATION (then_); then_ = NULL; then_se = false; } if (else_ && TREE_CODE (else_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL) { false_label = GOTO_DESTINATION (else_); else_ = NULL; else_se = false; } /* If we aren't hijacking a label for the 'then' branch, it falls through. */ if (true_label) true_label_p = &true_label; else true_label_p = NULL; /* The 'else' branch also needs a label if it contains interesting code. */ if (false_label || else_se) false_label_p = &false_label; else false_label_p = NULL; /* If there was nothing else in our arms, just forward the label(s). */ if (!then_se && !else_se) return shortcut_cond_r (pred, true_label_p, false_label_p, EXPR_LOC_OR_HERE (expr)); /* If our last subexpression already has a terminal label, reuse it. */ if (else_se) t = expr_last (else_); else if (then_se) t = expr_last (then_); else t = NULL; if (t && TREE_CODE (t) == LABEL_EXPR) end_label = LABEL_EXPR_LABEL (t); /* If we don't care about jumping to the 'else' branch, jump to the end if the condition is false. */ if (!false_label_p) false_label_p = &end_label; /* We only want to emit these labels if we aren't hijacking them. */ emit_end = (end_label == NULL_TREE); emit_false = (false_label == NULL_TREE); /* We only emit the jump over the else clause if we have to--if the then clause may fall through. Otherwise we can wind up with a useless jump and a useless label at the end of gimplified code, which will cause us to think that this conditional as a whole falls through even if it doesn't. If we then inline a function which ends with such a condition, that can cause us to issue an inappropriate warning about control reaching the end of a non-void function. */ jump_over_else = block_may_fallthru (then_); pred = shortcut_cond_r (pred, true_label_p, false_label_p, EXPR_LOC_OR_HERE (expr)); expr = NULL; append_to_statement_list (pred, &expr); append_to_statement_list (then_, &expr); if (else_se) { if (jump_over_else) { tree last = expr_last (expr); t = build_and_jump (&end_label); if (EXPR_HAS_LOCATION (last)) SET_EXPR_LOCATION (t, EXPR_LOCATION (last)); append_to_statement_list (t, &expr); } if (emit_false) { t = build1 (LABEL_EXPR, void_type_node, false_label); append_to_statement_list (t, &expr); } append_to_statement_list (else_, &expr); } if (emit_end && end_label) { t = build1 (LABEL_EXPR, void_type_node, end_label); append_to_statement_list (t, &expr); } return expr; } /* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */ tree gimple_boolify (tree expr) { tree type = TREE_TYPE (expr); location_t loc = EXPR_LOCATION (expr); if (TREE_CODE (expr) == NE_EXPR && TREE_CODE (TREE_OPERAND (expr, 0)) == CALL_EXPR && integer_zerop (TREE_OPERAND (expr, 1))) { tree call = TREE_OPERAND (expr, 0); tree fn = get_callee_fndecl (call); /* For __builtin_expect ((long) (x), y) recurse into x as well if x is truth_value_p. */ if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (fn) == BUILT_IN_EXPECT && call_expr_nargs (call) == 2) { tree arg = CALL_EXPR_ARG (call, 0); if (arg) { if (TREE_CODE (arg) == NOP_EXPR && TREE_TYPE (arg) == TREE_TYPE (call)) arg = TREE_OPERAND (arg, 0); if (truth_value_p (TREE_CODE (arg))) { arg = gimple_boolify (arg); CALL_EXPR_ARG (call, 0) = fold_convert_loc (loc, TREE_TYPE (call), arg); } } } } if (TREE_CODE (type) == BOOLEAN_TYPE) return expr; switch (TREE_CODE (expr)) { case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: /* Also boolify the arguments of truth exprs. */ TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1)); /* FALLTHRU */ case TRUTH_NOT_EXPR: TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* FALLTHRU */ case EQ_EXPR: case NE_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: /* These expressions always produce boolean results. */ TREE_TYPE (expr) = boolean_type_node; return expr; default: /* Other expressions that get here must have boolean values, but might need to be converted to the appropriate mode. */ return fold_convert_loc (loc, boolean_type_node, expr); } } /* Given a conditional expression *EXPR_P without side effects, gimplify its operands. New statements are inserted to PRE_P. */ static enum gimplify_status gimplify_pure_cond_expr (tree *expr_p, gimple_seq *pre_p) { tree expr = *expr_p, cond; enum gimplify_status ret, tret; enum tree_code code; cond = gimple_boolify (COND_EXPR_COND (expr)); /* We need to handle && and || specially, as their gimplification creates pure cond_expr, thus leading to an infinite cycle otherwise. */ code = TREE_CODE (cond); if (code == TRUTH_ANDIF_EXPR) TREE_SET_CODE (cond, TRUTH_AND_EXPR); else if (code == TRUTH_ORIF_EXPR) TREE_SET_CODE (cond, TRUTH_OR_EXPR); ret = gimplify_expr (&cond, pre_p, NULL, is_gimple_condexpr, fb_rvalue); COND_EXPR_COND (*expr_p) = cond; tret = gimplify_expr (&COND_EXPR_THEN (expr), pre_p, NULL, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); tret = gimplify_expr (&COND_EXPR_ELSE (expr), pre_p, NULL, is_gimple_val, fb_rvalue); return MIN (ret, tret); } /* Returns true if evaluating EXPR could trap. EXPR is GENERIC, while tree_could_trap_p can be called only on GIMPLE. */ static bool generic_expr_could_trap_p (tree expr) { unsigned i, n; if (!expr || is_gimple_val (expr)) return false; if (!EXPR_P (expr) || tree_could_trap_p (expr)) return true; n = TREE_OPERAND_LENGTH (expr); for (i = 0; i < n; i++) if (generic_expr_could_trap_p (TREE_OPERAND (expr, i))) return true; return false; } /* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;' into if (p) if (p) t1 = a; a; else or else t1 = b; b; t1; The second form is used when *EXPR_P is of type void. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_cond_expr (tree *expr_p, gimple_seq *pre_p, fallback_t fallback) { tree expr = *expr_p; tree type = TREE_TYPE (expr); location_t loc = EXPR_LOCATION (expr); tree tmp, arm1, arm2; enum gimplify_status ret; tree label_true, label_false, label_cont; bool have_then_clause_p, have_else_clause_p; gimple gimple_cond; enum tree_code pred_code; gimple_seq seq = NULL; /* If this COND_EXPR has a value, copy the values into a temporary within the arms. */ if (!VOID_TYPE_P (type)) { tree then_ = TREE_OPERAND (expr, 1), else_ = TREE_OPERAND (expr, 2); tree result; /* If either an rvalue is ok or we do not require an lvalue, create the temporary. But we cannot do that if the type is addressable. */ if (((fallback & fb_rvalue) || !(fallback & fb_lvalue)) && !TREE_ADDRESSABLE (type)) { if (gimplify_ctxp->allow_rhs_cond_expr /* If either branch has side effects or could trap, it can't be evaluated unconditionally. */ && !TREE_SIDE_EFFECTS (then_) && !generic_expr_could_trap_p (then_) && !TREE_SIDE_EFFECTS (else_) && !generic_expr_could_trap_p (else_)) return gimplify_pure_cond_expr (expr_p, pre_p); tmp = create_tmp_var (type, "iftmp"); result = tmp; } /* Otherwise, only create and copy references to the values. */ else { type = build_pointer_type (type); if (!VOID_TYPE_P (TREE_TYPE (then_))) then_ = build_fold_addr_expr_loc (loc, then_); if (!VOID_TYPE_P (TREE_TYPE (else_))) else_ = build_fold_addr_expr_loc (loc, else_); expr = build3 (COND_EXPR, type, TREE_OPERAND (expr, 0), then_, else_); tmp = create_tmp_var (type, "iftmp"); result = build_simple_mem_ref_loc (loc, tmp); } /* Build the new then clause, `tmp = then_;'. But don't build the assignment if the value is void; in C++ it can be if it's a throw. */ if (!VOID_TYPE_P (TREE_TYPE (then_))) TREE_OPERAND (expr, 1) = build2 (MODIFY_EXPR, type, tmp, then_); /* Similarly, build the new else clause, `tmp = else_;'. */ if (!VOID_TYPE_P (TREE_TYPE (else_))) TREE_OPERAND (expr, 2) = build2 (MODIFY_EXPR, type, tmp, else_); TREE_TYPE (expr) = void_type_node; recalculate_side_effects (expr); /* Move the COND_EXPR to the prequeue. */ gimplify_stmt (&expr, pre_p); *expr_p = result; return GS_ALL_DONE; } /* Make sure the condition has BOOLEAN_TYPE. */ TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* Break apart && and || conditions. */ if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR || TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR) { expr = shortcut_cond_expr (expr); if (expr != *expr_p) { *expr_p = expr; /* We can't rely on gimplify_expr to re-gimplify the expanded form properly, as cleanups might cause the target labels to be wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to set up a conditional context. */ gimple_push_condition (); gimplify_stmt (expr_p, &seq); gimple_pop_condition (pre_p); gimple_seq_add_seq (pre_p, seq); return GS_ALL_DONE; } } /* Now do the normal gimplification. */ /* Gimplify condition. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); if (ret == GS_ERROR) return GS_ERROR; gcc_assert (TREE_OPERAND (expr, 0) != NULL_TREE); gimple_push_condition (); have_then_clause_p = have_else_clause_p = false; if (TREE_OPERAND (expr, 1) != NULL && TREE_CODE (TREE_OPERAND (expr, 1)) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 1))) == LABEL_DECL && (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 1))) == current_function_decl) /* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR have different locations, otherwise we end up with incorrect location information on the branches. */ && (optimize || !EXPR_HAS_LOCATION (expr) || !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 1)) || EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 1)))) { label_true = GOTO_DESTINATION (TREE_OPERAND (expr, 1)); have_then_clause_p = true; } else label_true = create_artificial_label (UNKNOWN_LOCATION); if (TREE_OPERAND (expr, 2) != NULL && TREE_CODE (TREE_OPERAND (expr, 2)) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 2))) == LABEL_DECL && (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 2))) == current_function_decl) /* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR have different locations, otherwise we end up with incorrect location information on the branches. */ && (optimize || !EXPR_HAS_LOCATION (expr) || !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 2)) || EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 2)))) { label_false = GOTO_DESTINATION (TREE_OPERAND (expr, 2)); have_else_clause_p = true; } else label_false = create_artificial_label (UNKNOWN_LOCATION); gimple_cond_get_ops_from_tree (COND_EXPR_COND (expr), &pred_code, &arm1, &arm2); gimple_cond = gimple_build_cond (pred_code, arm1, arm2, label_true, label_false); gimplify_seq_add_stmt (&seq, gimple_cond); label_cont = NULL_TREE; if (!have_then_clause_p) { /* For if (...) {} else { code; } put label_true after the else block. */ if (TREE_OPERAND (expr, 1) == NULL_TREE && !have_else_clause_p && TREE_OPERAND (expr, 2) != NULL_TREE) label_cont = label_true; else { gimplify_seq_add_stmt (&seq, gimple_build_label (label_true)); have_then_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 1), &seq); /* For if (...) { code; } else {} or if (...) { code; } else goto label; or if (...) { code; return; } else { ... } label_cont isn't needed. */ if (!have_else_clause_p && TREE_OPERAND (expr, 2) != NULL_TREE && gimple_seq_may_fallthru (seq)) { gimple g; label_cont = create_artificial_label (UNKNOWN_LOCATION); g = gimple_build_goto (label_cont); /* GIMPLE_COND's are very low level; they have embedded gotos. This particular embedded goto should not be marked with the location of the original COND_EXPR, as it would correspond to the COND_EXPR's condition, not the ELSE or the THEN arms. To avoid marking it with the wrong location, flag it as "no location". */ gimple_set_do_not_emit_location (g); gimplify_seq_add_stmt (&seq, g); } } } if (!have_else_clause_p) { gimplify_seq_add_stmt (&seq, gimple_build_label (label_false)); have_else_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 2), &seq); } if (label_cont) gimplify_seq_add_stmt (&seq, gimple_build_label (label_cont)); gimple_pop_condition (pre_p); gimple_seq_add_seq (pre_p, seq); if (ret == GS_ERROR) ; /* Do nothing. */ else if (have_then_clause_p || have_else_clause_p) ret = GS_ALL_DONE; else { /* Both arms are empty; replace the COND_EXPR with its predicate. */ expr = TREE_OPERAND (expr, 0); gimplify_stmt (&expr, pre_p); } *expr_p = NULL; return ret; } /* Prepare the node pointed to by EXPR_P, an is_gimple_addressable expression, to be marked addressable. We cannot rely on such an expression being directly markable if a temporary has been created by the gimplification. In this case, we create another temporary and initialize it with a copy, which will become a store after we mark it addressable. This can happen if the front-end passed us something that it could not mark addressable yet, like a Fortran pass-by-reference parameter (int) floatvar. */ static void prepare_gimple_addressable (tree *expr_p, gimple_seq *seq_p) { while (handled_component_p (*expr_p)) expr_p = &TREE_OPERAND (*expr_p, 0); if (is_gimple_reg (*expr_p)) *expr_p = get_initialized_tmp_var (*expr_p, seq_p, NULL); } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memcpy. */ static enum gimplify_status gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value, gimple_seq *seq_p) { tree t, to, to_ptr, from, from_ptr; gimple gs; location_t loc = EXPR_LOCATION (*expr_p); to = TREE_OPERAND (*expr_p, 0); from = TREE_OPERAND (*expr_p, 1); /* Mark the RHS addressable. Beware that it may not be possible to do so directly if a temporary has been created by the gimplification. */ prepare_gimple_addressable (&from, seq_p); mark_addressable (from); from_ptr = build_fold_addr_expr_loc (loc, from); gimplify_arg (&from_ptr, seq_p, loc); mark_addressable (to); to_ptr = build_fold_addr_expr_loc (loc, to); gimplify_arg (&to_ptr, seq_p, loc); t = implicit_built_in_decls[BUILT_IN_MEMCPY]; gs = gimple_build_call (t, 3, to_ptr, from_ptr, size); if (want_value) { /* tmp = memcpy() */ t = create_tmp_var (TREE_TYPE (to_ptr), NULL); gimple_call_set_lhs (gs, t); gimplify_seq_add_stmt (seq_p, gs); *expr_p = build_simple_mem_ref (t); return GS_ALL_DONE; } gimplify_seq_add_stmt (seq_p, gs); *expr_p = NULL; return GS_ALL_DONE; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memset. In this case we know that the RHS is a CONSTRUCTOR with an empty element list. */ static enum gimplify_status gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value, gimple_seq *seq_p) { tree t, from, to, to_ptr; gimple gs; location_t loc = EXPR_LOCATION (*expr_p); /* Assert our assumptions, to abort instead of producing wrong code silently if they are not met. Beware that the RHS CONSTRUCTOR might not be immediately exposed. */ from = TREE_OPERAND (*expr_p, 1); if (TREE_CODE (from) == WITH_SIZE_EXPR) from = TREE_OPERAND (from, 0); gcc_assert (TREE_CODE (from) == CONSTRUCTOR && VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (from))); /* Now proceed. */ to = TREE_OPERAND (*expr_p, 0); to_ptr = build_fold_addr_expr_loc (loc, to); gimplify_arg (&to_ptr, seq_p, loc); t = implicit_built_in_decls[BUILT_IN_MEMSET]; gs = gimple_build_call (t, 3, to_ptr, integer_zero_node, size); if (want_value) { /* tmp = memset() */ t = create_tmp_var (TREE_TYPE (to_ptr), NULL); gimple_call_set_lhs (gs, t); gimplify_seq_add_stmt (seq_p, gs); *expr_p = build1 (INDIRECT_REF, TREE_TYPE (to), t); return GS_ALL_DONE; } gimplify_seq_add_stmt (seq_p, gs); *expr_p = NULL; return GS_ALL_DONE; } /* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree, determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an assignment. Returns non-null if we detect a potential overlap. */ struct gimplify_init_ctor_preeval_data { /* The base decl of the lhs object. May be NULL, in which case we have to assume the lhs is indirect. */ tree lhs_base_decl; /* The alias set of the lhs object. */ alias_set_type lhs_alias_set; }; static tree gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata) { struct gimplify_init_ctor_preeval_data *data = (struct gimplify_init_ctor_preeval_data *) xdata; tree t = *tp; /* If we find the base object, obviously we have overlap. */ if (data->lhs_base_decl == t) return t; /* If the constructor component is indirect, determine if we have a potential overlap with the lhs. The only bits of information we have to go on at this point are addressability and alias sets. */ if ((INDIRECT_REF_P (t) || TREE_CODE (t) == MEM_REF) && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t))) return t; /* If the constructor component is a call, determine if it can hide a potential overlap with the lhs through an INDIRECT_REF like above. ??? Ugh - this is completely broken. In fact this whole analysis doesn't look conservative. */ if (TREE_CODE (t) == CALL_EXPR) { tree type, fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (t))); for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type)) if (POINTER_TYPE_P (TREE_VALUE (type)) && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (TREE_TYPE (TREE_VALUE (type))))) return t; } if (IS_TYPE_OR_DECL_P (t)) *walk_subtrees = 0; return NULL; } /* A subroutine of gimplify_init_constructor. Pre-evaluate EXPR, force values that overlap with the lhs (as described by *DATA) into temporaries. */ static void gimplify_init_ctor_preeval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, struct gimplify_init_ctor_preeval_data *data) { enum gimplify_status one; /* If the value is constant, then there's nothing to pre-evaluate. */ if (TREE_CONSTANT (*expr_p)) { /* Ensure it does not have side effects, it might contain a reference to the object we're initializing. */ gcc_assert (!TREE_SIDE_EFFECTS (*expr_p)); return; } /* If the type has non-trivial constructors, we can't pre-evaluate. */ if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p))) return; /* Recurse for nested constructors. */ if (TREE_CODE (*expr_p) == CONSTRUCTOR) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (*expr_p); FOR_EACH_VEC_ELT (constructor_elt, v, ix, ce) gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data); return; } /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* Gimplify the constructor element to something appropriate for the rhs of a MODIFY_EXPR. Given that we know the LHS is an aggregate, we know the gimplifier will consider this a store to memory. Doing this gimplification now means that we won't have to deal with complicated language-specific trees, nor trees like SAVE_EXPR that can induce exponential search behavior. */ one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue); if (one == GS_ERROR) { *expr_p = NULL; return; } /* If we gimplified to a bare decl, we can be sure that it doesn't overlap with the lhs, since "a = { .x=a }" doesn't make sense. This will always be true for all scalars, since is_gimple_mem_rhs insists on a temporary variable for them. */ if (DECL_P (*expr_p)) return; /* If this is of variable size, we have no choice but to assume it doesn't overlap since we can't make a temporary for it. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST) return; /* Otherwise, we must search for overlap ... */ if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL)) return; /* ... and if found, force the value into a temporary. */ *expr_p = get_formal_tmp_var (*expr_p, pre_p); } /* A subroutine of gimplify_init_ctor_eval. Create a loop for a RANGE_EXPR in a CONSTRUCTOR for an array. var = lower; loop_entry: object[var] = value; if (var == upper) goto loop_exit; var = var + 1; goto loop_entry; loop_exit: We increment var _after_ the loop exit check because we might otherwise fail if upper == TYPE_MAX_VALUE (type for upper). Note that we never have to deal with SAVE_EXPRs here, because this has already been taken care of for us, in gimplify_init_ctor_preeval(). */ static void gimplify_init_ctor_eval (tree, VEC(constructor_elt,gc) *, gimple_seq *, bool); static void gimplify_init_ctor_eval_range (tree object, tree lower, tree upper, tree value, tree array_elt_type, gimple_seq *pre_p, bool cleared) { tree loop_entry_label, loop_exit_label, fall_thru_label; tree var, var_type, cref, tmp; loop_entry_label = create_artificial_label (UNKNOWN_LOCATION); loop_exit_label = create_artificial_label (UNKNOWN_LOCATION); fall_thru_label = create_artificial_label (UNKNOWN_LOCATION); /* Create and initialize the index variable. */ var_type = TREE_TYPE (upper); var = create_tmp_var (var_type, NULL); gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, lower)); /* Add the loop entry label. */ gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_entry_label)); /* Build the reference. */ cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), var, NULL_TREE, NULL_TREE); /* If we are a constructor, just call gimplify_init_ctor_eval to do the store. Otherwise just assign value to the reference. */ if (TREE_CODE (value) == CONSTRUCTOR) /* NB we might have to call ourself recursively through gimplify_init_ctor_eval if the value is a constructor. */ gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else gimplify_seq_add_stmt (pre_p, gimple_build_assign (cref, value)); /* We exit the loop when the index var is equal to the upper bound. */ gimplify_seq_add_stmt (pre_p, gimple_build_cond (EQ_EXPR, var, upper, loop_exit_label, fall_thru_label)); gimplify_seq_add_stmt (pre_p, gimple_build_label (fall_thru_label)); /* Otherwise, increment the index var... */ tmp = build2 (PLUS_EXPR, var_type, var, fold_convert (var_type, integer_one_node)); gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, tmp)); /* ...and jump back to the loop entry. */ gimplify_seq_add_stmt (pre_p, gimple_build_goto (loop_entry_label)); /* Add the loop exit label. */ gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_exit_label)); } /* Return true if FDECL is accessing a field that is zero sized. */ static bool zero_sized_field_decl (const_tree fdecl) { if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl) && integer_zerop (DECL_SIZE (fdecl))) return true; return false; } /* Return true if TYPE is zero sized. */ static bool zero_sized_type (const_tree type) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && integer_zerop (TYPE_SIZE (type))) return true; return false; } /* A subroutine of gimplify_init_constructor. Generate individual MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the CONSTRUCTOR. CLEARED is true if the entire LHS object has been zeroed first. */ static void gimplify_init_ctor_eval (tree object, VEC(constructor_elt,gc) *elts, gimple_seq *pre_p, bool cleared) { tree array_elt_type = NULL; unsigned HOST_WIDE_INT ix; tree purpose, value; if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE) array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object))); FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value) { tree cref; /* NULL values are created above for gimplification errors. */ if (value == NULL) continue; if (cleared && initializer_zerop (value)) continue; /* ??? Here's to hoping the front end fills in all of the indices, so we don't have to figure out what's missing ourselves. */ gcc_assert (purpose); /* Skip zero-sized fields, unless value has side-effects. This can happen with calls to functions returning a zero-sized type, which we shouldn't discard. As a number of downstream passes don't expect sets of zero-sized fields, we rely on the gimplification of the MODIFY_EXPR we make below to drop the assignment statement. */ if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose)) continue; /* If we have a RANGE_EXPR, we have to build a loop to assign the whole range. */ if (TREE_CODE (purpose) == RANGE_EXPR) { tree lower = TREE_OPERAND (purpose, 0); tree upper = TREE_OPERAND (purpose, 1); /* If the lower bound is equal to upper, just treat it as if upper was the index. */ if (simple_cst_equal (lower, upper)) purpose = upper; else { gimplify_init_ctor_eval_range (object, lower, upper, value, array_elt_type, pre_p, cleared); continue; } } if (array_elt_type) { /* Do not use bitsizetype for ARRAY_REF indices. */ if (TYPE_DOMAIN (TREE_TYPE (object))) purpose = fold_convert (TREE_TYPE (TYPE_DOMAIN (TREE_TYPE (object))), purpose); cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), purpose, NULL_TREE, NULL_TREE); } else { gcc_assert (TREE_CODE (purpose) == FIELD_DECL); cref = build3 (COMPONENT_REF, TREE_TYPE (purpose), unshare_expr (object), purpose, NULL_TREE); } if (TREE_CODE (value) == CONSTRUCTOR && TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE) gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else { tree init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value); gimplify_and_add (init, pre_p); ggc_free (init); } } } /* Returns the appropriate RHS predicate for this LHS. */ gimple_predicate rhs_predicate_for (tree lhs) { if (is_gimple_reg (lhs)) return is_gimple_reg_rhs_or_call; else return is_gimple_mem_rhs_or_call; } /* Gimplify a C99 compound literal expression. This just means adding the DECL_EXPR before the current statement and using its anonymous decl instead. */ static enum gimplify_status gimplify_compound_literal_expr (tree *expr_p, gimple_seq *pre_p) { tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (*expr_p); tree decl = DECL_EXPR_DECL (decl_s); /* Mark the decl as addressable if the compound literal expression is addressable now, otherwise it is marked too late after we gimplify the initialization expression. */ if (TREE_ADDRESSABLE (*expr_p)) TREE_ADDRESSABLE (decl) = 1; /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if ((TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (decl)) == VECTOR_TYPE) && !TREE_THIS_VOLATILE (decl) && !needs_to_live_in_memory (decl)) DECL_GIMPLE_REG_P (decl) = 1; /* This decl isn't mentioned in the enclosing block, so add it to the list of temps. FIXME it seems a bit of a kludge to say that anonymous artificial vars aren't pushed, but everything else is. */ if (DECL_NAME (decl) == NULL_TREE && !DECL_SEEN_IN_BIND_EXPR_P (decl)) gimple_add_tmp_var (decl); gimplify_and_add (decl_s, pre_p); *expr_p = decl; return GS_OK; } /* Optimize embedded COMPOUND_LITERAL_EXPRs within a CONSTRUCTOR, return a new CONSTRUCTOR if something changed. */ static tree optimize_compound_literals_in_ctor (tree orig_ctor) { tree ctor = orig_ctor; VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (ctor); unsigned int idx, num = VEC_length (constructor_elt, elts); for (idx = 0; idx < num; idx++) { tree value = VEC_index (constructor_elt, elts, idx)->value; tree newval = value; if (TREE_CODE (value) == CONSTRUCTOR) newval = optimize_compound_literals_in_ctor (value); else if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR) { tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (value); tree decl = DECL_EXPR_DECL (decl_s); tree init = DECL_INITIAL (decl); if (!TREE_ADDRESSABLE (value) && !TREE_ADDRESSABLE (decl) && init) newval = optimize_compound_literals_in_ctor (init); } if (newval == value) continue; if (ctor == orig_ctor) { ctor = copy_node (orig_ctor); CONSTRUCTOR_ELTS (ctor) = VEC_copy (constructor_elt, gc, elts); elts = CONSTRUCTOR_ELTS (ctor); } VEC_index (constructor_elt, elts, idx)->value = newval; } return ctor; } /* A subroutine of gimplify_modify_expr. Break out elements of a CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs. Note that we still need to clear any elements that don't have explicit initializers, so if not all elements are initialized we keep the original MODIFY_EXPR, we just remove all of the constructor elements. If NOTIFY_TEMP_CREATION is true, do not gimplify, just return GS_ERROR if we would have to create a temporary when gimplifying this constructor. Otherwise, return GS_OK. If NOTIFY_TEMP_CREATION is false, just do the gimplification. */ static enum gimplify_status gimplify_init_constructor (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value, bool notify_temp_creation) { tree object, ctor, type; enum gimplify_status ret; VEC(constructor_elt,gc) *elts; gcc_assert (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == CONSTRUCTOR); if (!notify_temp_creation) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; } object = TREE_OPERAND (*expr_p, 0); ctor = TREE_OPERAND (*expr_p, 1) = optimize_compound_literals_in_ctor (TREE_OPERAND (*expr_p, 1)); type = TREE_TYPE (ctor); elts = CONSTRUCTOR_ELTS (ctor); ret = GS_ALL_DONE; switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ARRAY_TYPE: { struct gimplify_init_ctor_preeval_data preeval_data; HOST_WIDE_INT num_type_elements, num_ctor_elements; HOST_WIDE_INT num_nonzero_elements; bool cleared, valid_const_initializer; /* Aggregate types must lower constructors to initialization of individual elements. The exception is that a CONSTRUCTOR node with no elements indicates zero-initialization of the whole. */ if (VEC_empty (constructor_elt, elts)) { if (notify_temp_creation) return GS_OK; break; } /* Fetch information about the constructor to direct later processing. We might want to make static versions of it in various cases, and can only do so if it known to be a valid constant initializer. */ valid_const_initializer = categorize_ctor_elements (ctor, &num_nonzero_elements, &num_ctor_elements, &cleared); /* If a const aggregate variable is being initialized, then it should never be a lose to promote the variable to be static. */ if (valid_const_initializer && num_nonzero_elements > 1 && TREE_READONLY (object) && TREE_CODE (object) == VAR_DECL && (flag_merge_constants >= 2 || !TREE_ADDRESSABLE (object))) { if (notify_temp_creation) return GS_ERROR; DECL_INITIAL (object) = ctor; TREE_STATIC (object) = 1; if (!DECL_NAME (object)) DECL_NAME (object) = create_tmp_var_name ("C"); walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL); /* ??? C++ doesn't automatically append a .<number> to the assembler name, and even when it does, it looks a FE private data structures to figure out what that number should be, which are not set for this variable. I suppose this is important for local statics for inline functions, which aren't "local" in the object file sense. So in order to get a unique TU-local symbol, we must invoke the lhd version now. */ lhd_set_decl_assembler_name (object); *expr_p = NULL_TREE; break; } /* If there are "lots" of initialized elements, even discounting those that are not address constants (and thus *must* be computed at runtime), then partition the constructor into constant and non-constant parts. Block copy the constant parts in, then generate code for the non-constant parts. */ /* TODO. There's code in cp/typeck.c to do this. */ num_type_elements = count_type_elements (type, true); /* If count_type_elements could not determine number of type elements for a constant-sized object, assume clearing is needed. Don't do this for variable-sized objects, as store_constructor will ignore the clearing of variable-sized objects. */ if (num_type_elements < 0 && int_size_in_bytes (type) >= 0) cleared = true; /* If there are "lots" of zeros, then block clear the object first. */ else if (num_type_elements - num_nonzero_elements > CLEAR_RATIO (optimize_function_for_speed_p (cfun)) && num_nonzero_elements < num_type_elements/4) cleared = true; /* ??? This bit ought not be needed. For any element not present in the initializer, we should simply set them to zero. Except we'd need to *find* the elements that are not present, and that requires trickery to avoid quadratic compile-time behavior in large cases or excessive memory use in small cases. */ else if (num_ctor_elements < num_type_elements) cleared = true; /* If there are "lots" of initialized elements, and all of them are valid address constants, then the entire initializer can be dropped to memory, and then memcpy'd out. Don't do this for sparse arrays, though, as it's more efficient to follow the standard CONSTRUCTOR behavior of memset followed by individual element initialization. Also don't do this for small all-zero initializers (which aren't big enough to merit clearing), and don't try to make bitwise copies of TREE_ADDRESSABLE types. */ if (valid_const_initializer && !(cleared || num_nonzero_elements == 0) && !TREE_ADDRESSABLE (type)) { HOST_WIDE_INT size = int_size_in_bytes (type); unsigned int align; /* ??? We can still get unbounded array types, at least from the C++ front end. This seems wrong, but attempt to work around it for now. */ if (size < 0) { size = int_size_in_bytes (TREE_TYPE (object)); if (size >= 0) TREE_TYPE (ctor) = type = TREE_TYPE (object); } /* Find the maximum alignment we can assume for the object. */ /* ??? Make use of DECL_OFFSET_ALIGN. */ if (DECL_P (object)) align = DECL_ALIGN (object); else align = TYPE_ALIGN (type); if (size > 0 && num_nonzero_elements > 1 && !can_move_by_pieces (size, align)) { if (notify_temp_creation) return GS_ERROR; walk_tree (&ctor, force_labels_r, NULL, NULL); ctor = tree_output_constant_def (ctor); if (!useless_type_conversion_p (type, TREE_TYPE (ctor))) ctor = build1 (VIEW_CONVERT_EXPR, type, ctor); TREE_OPERAND (*expr_p, 1) = ctor; /* This is no longer an assignment of a CONSTRUCTOR, but we still may have processing to do on the LHS. So pretend we didn't do anything here to let that happen. */ return GS_UNHANDLED; } } /* If the target is volatile, we have non-zero elements and more than one field to assign, initialize the target from a temporary. */ if (TREE_THIS_VOLATILE (object) && !TREE_ADDRESSABLE (type) && num_nonzero_elements > 0 && VEC_length (constructor_elt, elts) > 1) { tree temp = create_tmp_var (TYPE_MAIN_VARIANT (type), NULL); TREE_OPERAND (*expr_p, 0) = temp; *expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p), *expr_p, build2 (MODIFY_EXPR, void_type_node, object, temp)); return GS_OK; } if (notify_temp_creation) return GS_OK; /* If there are nonzero elements and if needed, pre-evaluate to capture elements overlapping with the lhs into temporaries. We must do this before clearing to fetch the values before they are zeroed-out. */ if (num_nonzero_elements > 0 && TREE_CODE (*expr_p) != INIT_EXPR) { preeval_data.lhs_base_decl = get_base_address (object); if (!DECL_P (preeval_data.lhs_base_decl)) preeval_data.lhs_base_decl = NULL; preeval_data.lhs_alias_set = get_alias_set (object); gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, &preeval_data); } if (cleared) { /* Zap the CONSTRUCTOR element list, which simplifies this case. Note that we still have to gimplify, in order to handle the case of variable sized types. Avoid shared tree structures. */ CONSTRUCTOR_ELTS (ctor) = NULL; TREE_SIDE_EFFECTS (ctor) = 0; object = unshare_expr (object); gimplify_stmt (expr_p, pre_p); } /* If we have not block cleared the object, or if there are nonzero elements in the constructor, add assignments to the individual scalar fields of the object. */ if (!cleared || num_nonzero_elements > 0) gimplify_init_ctor_eval (object, elts, pre_p, cleared); *expr_p = NULL_TREE; } break; case COMPLEX_TYPE: { tree r, i; if (notify_temp_creation) return GS_OK; /* Extract the real and imaginary parts out of the ctor. */ gcc_assert (VEC_length (constructor_elt, elts) == 2); r = VEC_index (constructor_elt, elts, 0)->value; i = VEC_index (constructor_elt, elts, 1)->value; if (r == NULL || i == NULL) { tree zero = build_zero_cst (TREE_TYPE (type)); if (r == NULL) r = zero; if (i == NULL) i = zero; } /* Complex types have either COMPLEX_CST or COMPLEX_EXPR to represent creation of a complex value. */ if (TREE_CONSTANT (r) && TREE_CONSTANT (i)) { ctor = build_complex (type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; } else { ctor = build2 (COMPLEX_EXPR, type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, rhs_predicate_for (TREE_OPERAND (*expr_p, 0)), fb_rvalue); } } break; case VECTOR_TYPE: { unsigned HOST_WIDE_INT ix; constructor_elt *ce; if (notify_temp_creation) return GS_OK; /* Go ahead and simplify constant constructors to VECTOR_CST. */ if (TREE_CONSTANT (ctor)) { bool constant_p = true; tree value; /* Even when ctor is constant, it might contain non-*_CST elements, such as addresses or trapping values like 1.0/0.0 - 1.0/0.0. Such expressions don't belong in VECTOR_CST nodes. */ FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value) if (!CONSTANT_CLASS_P (value)) { constant_p = false; break; } if (constant_p) { TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts); break; } /* Don't reduce an initializer constant even if we can't make a VECTOR_CST. It won't do anything for us, and it'll prevent us from representing it as a single constant. */ if (initializer_constant_valid_p (ctor, type)) break; TREE_CONSTANT (ctor) = 0; } /* Vector types use CONSTRUCTOR all the way through gimple compilation as a general initializer. */ FOR_EACH_VEC_ELT (constructor_elt, elts, ix, ce) { enum gimplify_status tret; tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val, fb_rvalue); if (tret == GS_ERROR) ret = GS_ERROR; } if (!is_gimple_reg (TREE_OPERAND (*expr_p, 0))) TREE_OPERAND (*expr_p, 1) = get_formal_tmp_var (ctor, pre_p); } break; default: /* So how did we get a CONSTRUCTOR for a scalar type? */ gcc_unreachable (); } if (ret == GS_ERROR) return GS_ERROR; else if (want_value) { *expr_p = object; return GS_OK; } else { /* If we have gimplified both sides of the initializer but have not emitted an assignment, do so now. */ if (*expr_p) { tree lhs = TREE_OPERAND (*expr_p, 0); tree rhs = TREE_OPERAND (*expr_p, 1); gimple init = gimple_build_assign (lhs, rhs); gimplify_seq_add_stmt (pre_p, init); *expr_p = NULL; } return GS_ALL_DONE; } } /* Given a pointer value OP0, return a simplified version of an indirection through OP0, or NULL_TREE if no simplification is possible. Note that the resulting type may be different from the type pointed to in the sense that it is still compatible from the langhooks point of view. */ tree gimple_fold_indirect_ref (tree t) { tree ptype = TREE_TYPE (t), type = TREE_TYPE (ptype); tree sub = t; tree subtype; STRIP_NOPS (sub); subtype = TREE_TYPE (sub); if (!POINTER_TYPE_P (subtype)) return NULL_TREE; if (TREE_CODE (sub) == ADDR_EXPR) { tree op = TREE_OPERAND (sub, 0); tree optype = TREE_TYPE (op); /* *&p => p */ if (useless_type_conversion_p (type, optype)) return op; /* *(foo *)&fooarray => fooarray[0] */ if (TREE_CODE (optype) == ARRAY_TYPE && TREE_CODE (TYPE_SIZE (TREE_TYPE (optype))) == INTEGER_CST && useless_type_conversion_p (type, TREE_TYPE (optype))) { tree type_domain = TYPE_DOMAIN (optype); tree min_val = size_zero_node; if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); if (TREE_CODE (min_val) == INTEGER_CST) return build4 (ARRAY_REF, type, op, min_val, NULL_TREE, NULL_TREE); } /* *(foo *)&complexfoo => __real__ complexfoo */ else if (TREE_CODE (optype) == COMPLEX_TYPE && useless_type_conversion_p (type, TREE_TYPE (optype))) return fold_build1 (REALPART_EXPR, type, op); /* *(foo *)&vectorfoo => BIT_FIELD_REF<vectorfoo,...> */ else if (TREE_CODE (optype) == VECTOR_TYPE && useless_type_conversion_p (type, TREE_TYPE (optype))) { tree part_width = TYPE_SIZE (type); tree index = bitsize_int (0); return fold_build3 (BIT_FIELD_REF, type, op, part_width, index); } } /* *(p + CST) -> ... */ if (TREE_CODE (sub) == POINTER_PLUS_EXPR && TREE_CODE (TREE_OPERAND (sub, 1)) == INTEGER_CST) { tree addr = TREE_OPERAND (sub, 0); tree off = TREE_OPERAND (sub, 1); tree addrtype; STRIP_NOPS (addr); addrtype = TREE_TYPE (addr); /* ((foo*)&vectorfoo)[1] -> BIT_FIELD_REF<vectorfoo,...> */ if (TREE_CODE (addr) == ADDR_EXPR && TREE_CODE (TREE_TYPE (addrtype)) == VECTOR_TYPE && useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (addrtype)))) { HOST_WIDE_INT offset = tree_low_cst (off, 0); tree part_width = TYPE_SIZE (type); unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0) / BITS_PER_UNIT; unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT; tree index = bitsize_int (indexi); if (offset / part_widthi <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (addrtype))) return fold_build3 (BIT_FIELD_REF, type, TREE_OPERAND (addr, 0), part_width, index); } /* ((foo*)&complexfoo)[1] -> __imag__ complexfoo */ if (TREE_CODE (addr) == ADDR_EXPR && TREE_CODE (TREE_TYPE (addrtype)) == COMPLEX_TYPE && useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (addrtype)))) { tree size = TYPE_SIZE_UNIT (type); if (tree_int_cst_equal (size, off)) return fold_build1 (IMAGPART_EXPR, type, TREE_OPERAND (addr, 0)); } /* *(p + CST) -> MEM_REF <p, CST>. */ if (TREE_CODE (addr) != ADDR_EXPR || DECL_P (TREE_OPERAND (addr, 0))) return fold_build2 (MEM_REF, type, addr, build_int_cst_wide (ptype, TREE_INT_CST_LOW (off), TREE_INT_CST_HIGH (off))); } /* *(foo *)fooarrptr => (*fooarrptr)[0] */ if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE && TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (subtype)))) == INTEGER_CST && useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (subtype)))) { tree type_domain; tree min_val = size_zero_node; tree osub = sub; sub = gimple_fold_indirect_ref (sub); if (! sub) sub = build1 (INDIRECT_REF, TREE_TYPE (subtype), osub); type_domain = TYPE_DOMAIN (TREE_TYPE (sub)); if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); if (TREE_CODE (min_val) == INTEGER_CST) return build4 (ARRAY_REF, type, sub, min_val, NULL_TREE, NULL_TREE); } return NULL_TREE; } /* Given a pointer value OP0, return a simplified version of an indirection through OP0, or NULL_TREE if no simplification is possible. This may only be applied to a rhs of an expression. Note that the resulting type may be different from the type pointed to in the sense that it is still compatible from the langhooks point of view. */ static tree gimple_fold_indirect_ref_rhs (tree t) { return gimple_fold_indirect_ref (t); } /* Subroutine of gimplify_modify_expr to do simplifications of MODIFY_EXPRs based on the code of the RHS. We loop for as long as something changes. */ static enum gimplify_status gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value) { enum gimplify_status ret = GS_UNHANDLED; bool changed; do { changed = false; switch (TREE_CODE (*from_p)) { case VAR_DECL: /* If we're assigning from a read-only variable initialized with a constructor, do the direct assignment from the constructor, but only if neither source nor target are volatile since this latter assignment might end up being done on a per-field basis. */ if (DECL_INITIAL (*from_p) && TREE_READONLY (*from_p) && !TREE_THIS_VOLATILE (*from_p) && !TREE_THIS_VOLATILE (*to_p) && TREE_CODE (DECL_INITIAL (*from_p)) == CONSTRUCTOR) { tree old_from = *from_p; enum gimplify_status subret; /* Move the constructor into the RHS. */ *from_p = unshare_expr (DECL_INITIAL (*from_p)); /* Let's see if gimplify_init_constructor will need to put it in memory. */ subret = gimplify_init_constructor (expr_p, NULL, NULL, false, true); if (subret == GS_ERROR) { /* If so, revert the change. */ *from_p = old_from; } else { ret = GS_OK; changed = true; } } break; case INDIRECT_REF: { /* If we have code like *(const A*)(A*)&x where the type of "x" is a (possibly cv-qualified variant of "A"), treat the entire expression as identical to "x". This kind of code arises in C++ when an object is bound to a const reference, and if "x" is a TARGET_EXPR we want to take advantage of the optimization below. */ bool volatile_p = TREE_THIS_VOLATILE (*from_p); tree t = gimple_fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0)); if (t) { if (TREE_THIS_VOLATILE (t) != volatile_p) { if (TREE_CODE_CLASS (TREE_CODE (t)) == tcc_declaration) t = build_simple_mem_ref_loc (EXPR_LOCATION (*from_p), build_fold_addr_expr (t)); if (REFERENCE_CLASS_P (t)) TREE_THIS_VOLATILE (t) = volatile_p; } *from_p = t; ret = GS_OK; changed = true; } break; } case TARGET_EXPR: { /* If we are initializing something from a TARGET_EXPR, strip the TARGET_EXPR and initialize it directly, if possible. This can't be done if the initializer is void, since that implies that the temporary is set in some non-trivial way. ??? What about code that pulls out the temp and uses it elsewhere? I think that such code never uses the TARGET_EXPR as an initializer. If I'm wrong, we'll die because the temp won't have any RTL. In that case, I guess we'll need to replace references somehow. */ tree init = TARGET_EXPR_INITIAL (*from_p); if (init && !VOID_TYPE_P (TREE_TYPE (init))) { *from_p = init; ret = GS_OK; changed = true; } } break; case COMPOUND_EXPR: /* Remove any COMPOUND_EXPR in the RHS so the following cases will be caught. */ gimplify_compound_expr (from_p, pre_p, true); ret = GS_OK; changed = true; break; case CONSTRUCTOR: /* If we already made some changes, let the front end have a crack at this before we break it down. */ if (ret != GS_UNHANDLED) break; /* If we're initializing from a CONSTRUCTOR, break this into individual MODIFY_EXPRs. */ return gimplify_init_constructor (expr_p, pre_p, post_p, want_value, false); case COND_EXPR: /* If we're assigning to a non-register type, push the assignment down into the branches. This is mandatory for ADDRESSABLE types, since we cannot generate temporaries for such, but it saves a copy in other cases as well. */ if (!is_gimple_reg_type (TREE_TYPE (*from_p))) { /* This code should mirror the code in gimplify_cond_expr. */ enum tree_code code = TREE_CODE (*expr_p); tree cond = *from_p; tree result = *to_p; ret = gimplify_expr (&result, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node) TREE_OPERAND (cond, 1) = build2 (code, void_type_node, result, TREE_OPERAND (cond, 1)); if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node) TREE_OPERAND (cond, 2) = build2 (code, void_type_node, unshare_expr (result), TREE_OPERAND (cond, 2)); TREE_TYPE (cond) = void_type_node; recalculate_side_effects (cond); if (want_value) { gimplify_and_add (cond, pre_p); *expr_p = unshare_expr (result); } else *expr_p = cond; return ret; } break; case CALL_EXPR: /* For calls that return in memory, give *to_p as the CALL_EXPR's return slot so that we don't generate a temporary. */ if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p) && aggregate_value_p (*from_p, *from_p)) { bool use_target; if (!(rhs_predicate_for (*to_p))(*from_p)) /* If we need a temporary, *to_p isn't accurate. */ use_target = false; else if (TREE_CODE (*to_p) == RESULT_DECL && DECL_NAME (*to_p) == NULL_TREE && needs_to_live_in_memory (*to_p)) /* It's OK to use the return slot directly unless it's an NRV. */ use_target = true; else if (is_gimple_reg_type (TREE_TYPE (*to_p)) || (DECL_P (*to_p) && DECL_REGISTER (*to_p))) /* Don't force regs into memory. */ use_target = false; else if (TREE_CODE (*expr_p) == INIT_EXPR) /* It's OK to use the target directly if it's being initialized. */ use_target = true; else if (!is_gimple_non_addressable (*to_p)) /* Don't use the original target if it's already addressable; if its address escapes, and the called function uses the NRV optimization, a conforming program could see *to_p change before the called function returns; see c++/19317. When optimizing, the return_slot pass marks more functions as safe after we have escape info. */ use_target = false; else use_target = true; if (use_target) { CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1; mark_addressable (*to_p); } } break; case WITH_SIZE_EXPR: /* Likewise for calls that return an aggregate of non-constant size, since we would not be able to generate a temporary at all. */ if (TREE_CODE (TREE_OPERAND (*from_p, 0)) == CALL_EXPR) { *from_p = TREE_OPERAND (*from_p, 0); /* We don't change ret in this case because the WITH_SIZE_EXPR might have been added in gimplify_modify_expr, so returning GS_OK would lead to an infinite loop. */ changed = true; } break; /* If we're initializing from a container, push the initialization inside it. */ case CLEANUP_POINT_EXPR: case BIND_EXPR: case STATEMENT_LIST: { tree wrap = *from_p; tree t; ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; t = voidify_wrapper_expr (wrap, *expr_p); gcc_assert (t == *expr_p); if (want_value) { gimplify_and_add (wrap, pre_p); *expr_p = unshare_expr (*to_p); } else *expr_p = wrap; return GS_OK; } case COMPOUND_LITERAL_EXPR: { tree complit = TREE_OPERAND (*expr_p, 1); tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (complit); tree decl = DECL_EXPR_DECL (decl_s); tree init = DECL_INITIAL (decl); /* struct T x = (struct T) { 0, 1, 2 } can be optimized into struct T x = { 0, 1, 2 } if the address of the compound literal has never been taken. */ if (!TREE_ADDRESSABLE (complit) && !TREE_ADDRESSABLE (decl) && init) { *expr_p = copy_node (*expr_p); TREE_OPERAND (*expr_p, 1) = init; return GS_OK; } } default: break; } } while (changed); return ret; } /* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with DECL_GIMPLE_REG_P set. IMPORTANT NOTE: This promotion is performed by introducing a load of the other, unmodified part of the complex object just before the total store. As a consequence, if the object is still uninitialized, an undefined value will be loaded into a register, which may result in a spurious exception if the register is floating-point and the value happens to be a signaling NaN for example. Then the fully-fledged complex operations lowering pass followed by a DCE pass are necessary in order to fix things up. */ static enum gimplify_status gimplify_modify_expr_complex_part (tree *expr_p, gimple_seq *pre_p, bool want_value) { enum tree_code code, ocode; tree lhs, rhs, new_rhs, other, realpart, imagpart; lhs = TREE_OPERAND (*expr_p, 0); rhs = TREE_OPERAND (*expr_p, 1); code = TREE_CODE (lhs); lhs = TREE_OPERAND (lhs, 0); ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR; other = build1 (ocode, TREE_TYPE (rhs), lhs); other = get_formal_tmp_var (other, pre_p); realpart = code == REALPART_EXPR ? rhs : other; imagpart = code == REALPART_EXPR ? other : rhs; if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart)) new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart); else new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart); gimplify_seq_add_stmt (pre_p, gimple_build_assign (lhs, new_rhs)); *expr_p = (want_value) ? rhs : NULL_TREE; return GS_ALL_DONE; } /* Gimplify the MODIFY_EXPR node pointed to by EXPR_P. modify_expr : varname '=' rhs | '*' ID '=' rhs PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value) { tree *from_p = &TREE_OPERAND (*expr_p, 1); tree *to_p = &TREE_OPERAND (*expr_p, 0); enum gimplify_status ret = GS_UNHANDLED; gimple assign; location_t loc = EXPR_LOCATION (*expr_p); gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR || TREE_CODE (*expr_p) == INIT_EXPR); /* Insert pointer conversions required by the middle-end that are not required by the frontend. This fixes middle-end type checking for for example gcc.dg/redecl-6.c. */ if (POINTER_TYPE_P (TREE_TYPE (*to_p))) { STRIP_USELESS_TYPE_CONVERSION (*from_p); if (!useless_type_conversion_p (TREE_TYPE (*to_p), TREE_TYPE (*from_p))) *from_p = fold_convert_loc (loc, TREE_TYPE (*to_p), *from_p); } /* See if any simplifications can be done based on what the RHS is. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* For zero sized types only gimplify the left hand side and right hand side as statements and throw away the assignment. Do this after gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable types properly. */ if (zero_sized_type (TREE_TYPE (*from_p)) && !want_value) { gimplify_stmt (from_p, pre_p); gimplify_stmt (to_p, pre_p); *expr_p = NULL_TREE; return GS_ALL_DONE; } /* If the value being copied is of variable width, compute the length of the copy into a WITH_SIZE_EXPR. Note that we need to do this before gimplifying any of the operands so that we can resolve any PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses the size of the expression to be copied, not of the destination, so that is what we must do here. */ maybe_with_size_expr (from_p); ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; /* As a special case, we have to temporarily allow for assignments with a CALL_EXPR on the RHS. Since in GIMPLE a function call is a toplevel statement, when gimplifying the GENERIC expression MODIFY_EXPR <a, CALL_EXPR <foo>>, we cannot create the tuple GIMPLE_ASSIGN <a, GIMPLE_CALL <foo>>. Instead, we need to create the tuple GIMPLE_CALL <a, foo>. To prevent gimplify_expr from trying to create a new temporary for foo's LHS, we tell it that it should only gimplify until it reaches the CALL_EXPR. On return from gimplify_expr, the newly created GIMPLE_CALL <foo> will be the last statement in *PRE_P and all we need to do here is set 'a' to be its LHS. */ ret = gimplify_expr (from_p, pre_p, post_p, rhs_predicate_for (*to_p), fb_rvalue); if (ret == GS_ERROR) return ret; /* Now see if the above changed *from_p to something we handle specially. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If we've got a variable sized assignment between two lvalues (i.e. does not involve a call), then we can make things a bit more straightforward by converting the assignment to memcpy or memset. */ if (TREE_CODE (*from_p) == WITH_SIZE_EXPR) { tree from = TREE_OPERAND (*from_p, 0); tree size = TREE_OPERAND (*from_p, 1); if (TREE_CODE (from) == CONSTRUCTOR) return gimplify_modify_expr_to_memset (expr_p, size, want_value, pre_p); if (is_gimple_addressable (from)) { *from_p = from; return gimplify_modify_expr_to_memcpy (expr_p, size, want_value, pre_p); } } /* Transform partial stores to non-addressable complex variables into total stores. This allows us to use real instead of virtual operands for these variables, which improves optimization. */ if ((TREE_CODE (*to_p) == REALPART_EXPR || TREE_CODE (*to_p) == IMAGPART_EXPR) && is_gimple_reg (TREE_OPERAND (*to_p, 0))) return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value); /* Try to alleviate the effects of the gimplification creating artificial temporaries (see for example is_gimple_reg_rhs) on the debug info. */ if (!gimplify_ctxp->into_ssa && TREE_CODE (*from_p) == VAR_DECL && DECL_IGNORED_P (*from_p) && DECL_P (*to_p) && !DECL_IGNORED_P (*to_p)) { if (!DECL_NAME (*from_p) && DECL_NAME (*to_p)) DECL_NAME (*from_p) = create_tmp_var_name (IDENTIFIER_POINTER (DECL_NAME (*to_p))); DECL_DEBUG_EXPR_IS_FROM (*from_p) = 1; SET_DECL_DEBUG_EXPR (*from_p, *to_p); } if (want_value && TREE_THIS_VOLATILE (*to_p)) *from_p = get_initialized_tmp_var (*from_p, pre_p, post_p); if (TREE_CODE (*from_p) == CALL_EXPR) { /* Since the RHS is a CALL_EXPR, we need to create a GIMPLE_CALL instead of a GIMPLE_ASSIGN. */ assign = gimple_build_call_from_tree (*from_p); if (!gimple_call_noreturn_p (assign)) gimple_call_set_lhs (assign, *to_p); } else { assign = gimple_build_assign (*to_p, *from_p); gimple_set_location (assign, EXPR_LOCATION (*expr_p)); } gimplify_seq_add_stmt (pre_p, assign); if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p)) { /* If we've somehow already got an SSA_NAME on the LHS, then we've probably modified it twice. Not good. */ gcc_assert (TREE_CODE (*to_p) != SSA_NAME); *to_p = make_ssa_name (*to_p, assign); gimple_set_lhs (assign, *to_p); } if (want_value) { *expr_p = TREE_THIS_VOLATILE (*to_p) ? *from_p : unshare_expr (*to_p); return GS_OK; } else *expr_p = NULL; return GS_ALL_DONE; } /* Gimplify a comparison between two variable-sized objects. Do this with a call to BUILT_IN_MEMCMP. */ static enum gimplify_status gimplify_variable_sized_compare (tree *expr_p) { location_t loc = EXPR_LOCATION (*expr_p); tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree t, arg, dest, src, expr; arg = TYPE_SIZE_UNIT (TREE_TYPE (op0)); arg = unshare_expr (arg); arg = SUBSTITUTE_PLACEHOLDER_IN_EXPR (arg, op0); src = build_fold_addr_expr_loc (loc, op1); dest = build_fold_addr_expr_loc (loc, op0); t = implicit_built_in_decls[BUILT_IN_MEMCMP]; t = build_call_expr_loc (loc, t, 3, dest, src, arg); expr = build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node); SET_EXPR_LOCATION (expr, loc); *expr_p = expr; return GS_OK; } /* Gimplify a comparison between two aggregate objects of integral scalar mode as a comparison between the bitwise equivalent scalar values. */ static enum gimplify_status gimplify_scalar_mode_aggregate_compare (tree *expr_p) { location_t loc = EXPR_LOCATION (*expr_p); tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (op0); tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1); op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op0); op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op1); *expr_p = fold_build2_loc (loc, TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1); return GS_OK; } /* Gimplify TRUTH_ANDIF_EXPR and TRUTH_ORIF_EXPR expressions. EXPR_P points to the expression to gimplify. Expressions of the form 'a && b' are gimplified to: a && b ? true : false LOCUS is the source location to be put on the generated COND_EXPR. gimplify_cond_expr will do the rest. */ static enum gimplify_status gimplify_boolean_expr (tree *expr_p, location_t locus) { /* Preserve the original type of the expression. */ tree type = TREE_TYPE (*expr_p); *expr_p = build3 (COND_EXPR, type, *expr_p, fold_convert_loc (locus, type, boolean_true_node), fold_convert_loc (locus, type, boolean_false_node)); SET_EXPR_LOCATION (*expr_p, locus); return GS_OK; } /* Gimplifies an expression sequence. This function gimplifies each expression and re-writes the original expression with the last expression of the sequence in GIMPLE form. PRE_P points to the list where the side effects for all the expressions in the sequence will be emitted. WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */ static enum gimplify_status gimplify_compound_expr (tree *expr_p, gimple_seq *pre_p, bool want_value) { tree t = *expr_p; do { tree *sub_p = &TREE_OPERAND (t, 0); if (TREE_CODE (*sub_p) == COMPOUND_EXPR) gimplify_compound_expr (sub_p, pre_p, false); else gimplify_stmt (sub_p, pre_p); t = TREE_OPERAND (t, 1); } while (TREE_CODE (t) == COMPOUND_EXPR); *expr_p = t; if (want_value) return GS_OK; else { gimplify_stmt (expr_p, pre_p); return GS_ALL_DONE; } } /* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to gimplify. After gimplification, EXPR_P will point to a new temporary that holds the original value of the SAVE_EXPR node. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_save_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { enum gimplify_status ret = GS_ALL_DONE; tree val; gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR); val = TREE_OPERAND (*expr_p, 0); /* If the SAVE_EXPR has not been resolved, then evaluate it once. */ if (!SAVE_EXPR_RESOLVED_P (*expr_p)) { /* The operand may be a void-valued expression such as SAVE_EXPRs generated by the Java frontend for class initialization. It is being executed only for its side-effects. */ if (TREE_TYPE (val) == void_type_node) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_stmt, fb_none); val = NULL; } else val = get_initialized_tmp_var (val, pre_p, post_p); TREE_OPERAND (*expr_p, 0) = val; SAVE_EXPR_RESOLVED_P (*expr_p) = 1; } *expr_p = val; return ret; } /* Re-write the ADDR_EXPR node pointed to by EXPR_P unary_expr : ... | '&' varname ... PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_addr_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { tree expr = *expr_p; tree op0 = TREE_OPERAND (expr, 0); enum gimplify_status ret; location_t loc = EXPR_LOCATION (*expr_p); switch (TREE_CODE (op0)) { case INDIRECT_REF: do_indirect_ref: /* Check if we are dealing with an expression of the form '&*ptr'. While the front end folds away '&*ptr' into 'ptr', these expressions may be generated internally by the compiler (e.g., builtins like __builtin_va_end). */ /* Caution: the silent array decomposition semantics we allow for ADDR_EXPR means we can't always discard the pair. */ /* Gimplification of the ADDR_EXPR operand may drop cv-qualification conversions, so make sure we add them if needed. */ { tree op00 = TREE_OPERAND (op0, 0); tree t_expr = TREE_TYPE (expr); tree t_op00 = TREE_TYPE (op00); if (!useless_type_conversion_p (t_expr, t_op00)) op00 = fold_convert_loc (loc, TREE_TYPE (expr), op00); *expr_p = op00; ret = GS_OK; } break; case VIEW_CONVERT_EXPR: /* Take the address of our operand and then convert it to the type of this ADDR_EXPR. ??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at all clear. The impact of this transformation is even less clear. */ /* If the operand is a useless conversion, look through it. Doing so guarantees that the ADDR_EXPR and its operand will remain of the same type. */ if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0))) op0 = TREE_OPERAND (op0, 0); *expr_p = fold_convert_loc (loc, TREE_TYPE (expr), build_fold_addr_expr_loc (loc, TREE_OPERAND (op0, 0))); ret = GS_OK; break; default: /* We use fb_either here because the C frontend sometimes takes the address of a call that returns a struct; see gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make the implied temporary explicit. */ /* Make the operand addressable. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p, is_gimple_addressable, fb_either); if (ret == GS_ERROR) break; /* Then mark it. Beware that it may not be possible to do so directly if a temporary has been created by the gimplification. */ prepare_gimple_addressable (&TREE_OPERAND (expr, 0), pre_p); op0 = TREE_OPERAND (expr, 0); /* For various reasons, the gimplification of the expression may have made a new INDIRECT_REF. */ if (TREE_CODE (op0) == INDIRECT_REF) goto do_indirect_ref; mark_addressable (TREE_OPERAND (expr, 0)); /* The FEs may end up building ADDR_EXPRs early on a decl with an incomplete type. Re-build ADDR_EXPRs in canonical form here. */ if (!types_compatible_p (TREE_TYPE (op0), TREE_TYPE (TREE_TYPE (expr)))) *expr_p = build_fold_addr_expr (op0); /* Make sure TREE_CONSTANT and TREE_SIDE_EFFECTS are set properly. */ recompute_tree_invariant_for_addr_expr (*expr_p); /* If we re-built the ADDR_EXPR add a conversion to the original type if required. */ if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p))) *expr_p = fold_convert (TREE_TYPE (expr), *expr_p); break; } return ret; } /* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple value; output operands should be a gimple lvalue. */ static enum gimplify_status gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { tree expr; int noutputs; const char **oconstraints; int i; tree link; const char *constraint; bool allows_mem, allows_reg, is_inout; enum gimplify_status ret, tret; gimple stmt; VEC(tree, gc) *inputs; VEC(tree, gc) *outputs; VEC(tree, gc) *clobbers; VEC(tree, gc) *labels; tree link_next; expr = *expr_p; noutputs = list_length (ASM_OUTPUTS (expr)); oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); inputs = outputs = clobbers = labels = NULL; ret = GS_ALL_DONE; link_next = NULL_TREE; for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = link_next) { bool ok; size_t constraint_len; link_next = TREE_CHAIN (link); oconstraints[i] = constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); constraint_len = strlen (constraint); if (constraint_len == 0) continue; ok = parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); if (!ok) { ret = GS_ERROR; is_inout = false; } if (!allows_reg && allows_mem) mark_addressable (TREE_VALUE (link)); tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); if (tret == GS_ERROR) { error ("invalid lvalue in asm output %d", i); ret = tret; } VEC_safe_push (tree, gc, outputs, link); TREE_CHAIN (link) = NULL_TREE; if (is_inout) { /* An input/output operand. To give the optimizers more flexibility, split it into separate input and output operands. */ tree input; char buf[10]; /* Turn the in/out constraint into an output constraint. */ char *p = xstrdup (constraint); p[0] = '='; TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p); /* And add a matching input constraint. */ if (allows_reg) { sprintf (buf, "%d", i); /* If there are multiple alternatives in the constraint, handle each of them individually. Those that allow register will be replaced with operand number, the others will stay unchanged. */ if (strchr (p, ',') != NULL) { size_t len = 0, buflen = strlen (buf); char *beg, *end, *str, *dst; for (beg = p + 1;;) { end = strchr (beg, ','); if (end == NULL) end = strchr (beg, '\0'); if ((size_t) (end - beg) < buflen) len += buflen + 1; else len += end - beg + 1; if (*end) beg = end + 1; else break; } str = (char *) alloca (len); for (beg = p + 1, dst = str;;) { const char *tem; bool mem_p, reg_p, inout_p; end = strchr (beg, ','); if (end) *end = '\0'; beg[-1] = '='; tem = beg - 1; parse_output_constraint (&tem, i, 0, 0, &mem_p, &reg_p, &inout_p); if (dst != str) *dst++ = ','; if (reg_p) { memcpy (dst, buf, buflen); dst += buflen; } else { if (end) len = end - beg; else len = strlen (beg); memcpy (dst, beg, len); dst += len; } if (end) beg = end + 1; else break; } *dst = '\0'; input = build_string (dst - str, str); } else input = build_string (strlen (buf), buf); } else input = build_string (constraint_len - 1, constraint + 1); free (p); input = build_tree_list (build_tree_list (NULL_TREE, input), unshare_expr (TREE_VALUE (link))); ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input); } } link_next = NULL_TREE; for (link = ASM_INPUTS (expr); link; ++i, link = link_next) { link_next = TREE_CHAIN (link); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If we can't make copies, we can only accept memory. */ if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (link)))) { if (allows_mem) allows_reg = 0; else { error ("impossible constraint in %<asm%>"); error ("non-memory input %d must stay in memory", i); return GS_ERROR; } } /* If the operand is a memory input, it should be an lvalue. */ if (!allows_reg && allows_mem) { tree inputv = TREE_VALUE (link); STRIP_NOPS (inputv); if (TREE_CODE (inputv) == PREDECREMENT_EXPR || TREE_CODE (inputv) == PREINCREMENT_EXPR || TREE_CODE (inputv) == POSTDECREMENT_EXPR || TREE_CODE (inputv) == POSTINCREMENT_EXPR) TREE_VALUE (link) = error_mark_node; tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_lvalue, fb_lvalue | fb_mayfail); mark_addressable (TREE_VALUE (link)); if (tret == GS_ERROR) { if (EXPR_HAS_LOCATION (TREE_VALUE (link))) input_location = EXPR_LOCATION (TREE_VALUE (link)); error ("memory input %d is not directly addressable", i); ret = tret; } } else { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_asm_val, fb_rvalue); if (tret == GS_ERROR) ret = tret; } TREE_CHAIN (link) = NULL_TREE; VEC_safe_push (tree, gc, inputs, link); } for (link = ASM_CLOBBERS (expr); link; ++i, link = TREE_CHAIN (link)) VEC_safe_push (tree, gc, clobbers, link); for (link = ASM_LABELS (expr); link; ++i, link = TREE_CHAIN (link)) VEC_safe_push (tree, gc, labels, link); /* Do not add ASMs with errors to the gimple IL stream. */ if (ret != GS_ERROR) { stmt = gimple_build_asm_vec (TREE_STRING_POINTER (ASM_STRING (expr)), inputs, outputs, clobbers, labels); gimple_asm_set_volatile (stmt, ASM_VOLATILE_P (expr)); gimple_asm_set_input (stmt, ASM_INPUT_P (expr)); gimplify_seq_add_stmt (pre_p, stmt); } return ret; } /* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding GIMPLE_WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we return to this function. FIXME should we complexify the prequeue handling instead? Or use flags for all the cleanups and let the optimizer tighten them up? The current code seems pretty fragile; it will break on a cleanup within any non-conditional nesting. But any such nesting would be broken, anyway; we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct and continues out of it. We can do that at the RTL level, though, so having an optimizer to tighten up try/finally regions would be a Good Thing. */ static enum gimplify_status gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p) { gimple_stmt_iterator iter; gimple_seq body_sequence = NULL; tree temp = voidify_wrapper_expr (*expr_p, NULL); /* We only care about the number of conditions between the innermost CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and any cleanups collected outside the CLEANUP_POINT_EXPR. */ int old_conds = gimplify_ctxp->conditions; gimple_seq old_cleanups = gimplify_ctxp->conditional_cleanups; gimplify_ctxp->conditions = 0; gimplify_ctxp->conditional_cleanups = NULL; gimplify_stmt (&TREE_OPERAND (*expr_p, 0), &body_sequence); gimplify_ctxp->conditions = old_conds; gimplify_ctxp->conditional_cleanups = old_cleanups; for (iter = gsi_start (body_sequence); !gsi_end_p (iter); ) { gimple wce = gsi_stmt (iter); if (gimple_code (wce) == GIMPLE_WITH_CLEANUP_EXPR) { if (gsi_one_before_end_p (iter)) { /* Note that gsi_insert_seq_before and gsi_remove do not scan operands, unlike some other sequence mutators. */ if (!gimple_wce_cleanup_eh_only (wce)) gsi_insert_seq_before_without_update (&iter, gimple_wce_cleanup (wce), GSI_SAME_STMT); gsi_remove (&iter, true); break; } else { gimple gtry; gimple_seq seq; enum gimple_try_flags kind; if (gimple_wce_cleanup_eh_only (wce)) kind = GIMPLE_TRY_CATCH; else kind = GIMPLE_TRY_FINALLY; seq = gsi_split_seq_after (iter); gtry = gimple_build_try (seq, gimple_wce_cleanup (wce), kind); /* Do not use gsi_replace here, as it may scan operands. We want to do a simple structural modification only. */ *gsi_stmt_ptr (&iter) = gtry; iter = gsi_start (seq); } } else gsi_next (&iter); } gimplify_seq_add_seq (pre_p, body_sequence); if (temp) { *expr_p = temp; return GS_OK; } else { *expr_p = NULL; return GS_ALL_DONE; } } /* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP is the cleanup action required. EH_ONLY is true if the cleanup should only be executed if an exception is thrown, not on normal exit. */ static void gimple_push_cleanup (tree var, tree cleanup, bool eh_only, gimple_seq *pre_p) { gimple wce; gimple_seq cleanup_stmts = NULL; /* Errors can result in improperly nested cleanups. Which results in confusion when trying to resolve the GIMPLE_WITH_CLEANUP_EXPR. */ if (seen_error ()) return; if (gimple_conditional_context ()) { /* If we're in a conditional context, this is more complex. We only want to run the cleanup if we actually ran the initialization that necessitates it, but we want to run it after the end of the conditional context. So we wrap the try/finally around the condition and use a flag to determine whether or not to actually run the destructor. Thus test ? f(A()) : 0 becomes (approximately) flag = 0; try { if (test) { A::A(temp); flag = 1; val = f(temp); } else { val = 0; } } finally { if (flag) A::~A(temp); } val */ tree flag = create_tmp_var (boolean_type_node, "cleanup"); gimple ffalse = gimple_build_assign (flag, boolean_false_node); gimple ftrue = gimple_build_assign (flag, boolean_true_node); cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL); gimplify_stmt (&cleanup, &cleanup_stmts); wce = gimple_build_wce (cleanup_stmts); gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, ffalse); gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce); gimplify_seq_add_stmt (pre_p, ftrue); /* Because of this manipulation, and the EH edges that jump threading cannot redirect, the temporary (VAR) will appear to be used uninitialized. Don't warn. */ TREE_NO_WARNING (var) = 1; } else { gimplify_stmt (&cleanup, &cleanup_stmts); wce = gimple_build_wce (cleanup_stmts); gimple_wce_set_cleanup_eh_only (wce, eh_only); gimplify_seq_add_stmt (pre_p, wce); } } /* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */ static enum gimplify_status gimplify_target_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { tree targ = *expr_p; tree temp = TARGET_EXPR_SLOT (targ); tree init = TARGET_EXPR_INITIAL (targ); enum gimplify_status ret; if (init) { /* TARGET_EXPR temps aren't part of the enclosing block, so add it to the temps list. Handle also variable length TARGET_EXPRs. */ if (TREE_CODE (DECL_SIZE (temp)) != INTEGER_CST) { if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (temp))) gimplify_type_sizes (TREE_TYPE (temp), pre_p); gimplify_vla_decl (temp, pre_p); } else gimple_add_tmp_var (temp); /* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the expression is supposed to initialize the slot. */ if (VOID_TYPE_P (TREE_TYPE (init))) ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); else { tree init_expr = build2 (INIT_EXPR, void_type_node, temp, init); init = init_expr; ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); init = NULL; ggc_free (init_expr); } if (ret == GS_ERROR) { /* PR c++/28266 Make sure this is expanded only once. */ TARGET_EXPR_INITIAL (targ) = NULL_TREE; return GS_ERROR; } if (init) gimplify_and_add (init, pre_p); /* If needed, push the cleanup for the temp. */ if (TARGET_EXPR_CLEANUP (targ)) gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ), CLEANUP_EH_ONLY (targ), pre_p); /* Only expand this once. */ TREE_OPERAND (targ, 3) = init; TARGET_EXPR_INITIAL (targ) = NULL_TREE; } else /* We should have expanded this before. */ gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp)); *expr_p = temp; return GS_OK; } /* Gimplification of expression trees. */ /* Gimplify an expression which appears at statement context. The corresponding GIMPLE statements are added to *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. Return true if we actually added a statement to the queue. */ bool gimplify_stmt (tree *stmt_p, gimple_seq *seq_p) { gimple_seq_node last; if (!*seq_p) *seq_p = gimple_seq_alloc (); last = gimple_seq_last (*seq_p); gimplify_expr (stmt_p, seq_p, NULL, is_gimple_stmt, fb_none); return last != gimple_seq_last (*seq_p); } /* A subroutine of gimplify_omp_atomic. The front end is supposed to have stabilized the lhs of the atomic operation as *ADDR. Return true if EXPR is this stabilized form. */ static bool goa_lhs_expr_p (tree expr, tree addr) { /* Also include casts to other type variants. The C front end is fond of adding these for e.g. volatile variables. This is like STRIP_TYPE_NOPS but includes the main variant lookup. */ STRIP_USELESS_TYPE_CONVERSION (expr); if (TREE_CODE (expr) == INDIRECT_REF) { expr = TREE_OPERAND (expr, 0); while (expr != addr && (CONVERT_EXPR_P (expr) || TREE_CODE (expr) == NON_LVALUE_EXPR) && TREE_CODE (expr) == TREE_CODE (addr) && types_compatible_p (TREE_TYPE (expr), TREE_TYPE (addr))) { expr = TREE_OPERAND (expr, 0); addr = TREE_OPERAND (addr, 0); } if (expr == addr) return true; return (TREE_CODE (addr) == ADDR_EXPR && TREE_CODE (expr) == ADDR_EXPR && TREE_OPERAND (addr, 0) == TREE_OPERAND (expr, 0)); } if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0)) return true; return false; } /* Walk *EXPR_P and replace appearances of *LHS_ADDR with LHS_VAR. If an expression does not involve the lhs, evaluate it into a temporary. Return 1 if the lhs appeared as a subexpression, 0 if it did not, or -1 if an error was encountered. */ static int goa_stabilize_expr (tree *expr_p, gimple_seq *pre_p, tree lhs_addr, tree lhs_var) { tree expr = *expr_p; int saw_lhs; if (goa_lhs_expr_p (expr, lhs_addr)) { *expr_p = lhs_var; return 1; } if (is_gimple_val (expr)) return 0; saw_lhs = 0; switch (TREE_CODE_CLASS (TREE_CODE (expr))) { case tcc_binary: case tcc_comparison: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr, lhs_var); case tcc_unary: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr, lhs_var); break; case tcc_expression: switch (TREE_CODE (expr)) { case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr, lhs_var); case TRUTH_NOT_EXPR: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr, lhs_var); break; default: break; } break; default: break; } if (saw_lhs == 0) { enum gimplify_status gs; gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue); if (gs != GS_ALL_DONE) saw_lhs = -1; } return saw_lhs; } /* Converts the GENERIC expression tree *EXPR_P to GIMPLE. If the expression produces a value to be used as an operand inside a GIMPLE statement, the value will be stored back in *EXPR_P. This value will be a tree of class tcc_declaration, tcc_constant, tcc_reference or an SSA_NAME. The corresponding sequence of GIMPLE statements is emitted in PRE_P and POST_P. Additionally, this process may overwrite parts of the input expression during gimplification. Ideally, it should be possible to do non-destructive gimplification. EXPR_P points to the GENERIC expression to convert to GIMPLE. If the expression needs to evaluate to a value to be used as an operand in a GIMPLE statement, this value will be stored in *EXPR_P on exit. This happens when the caller specifies one of fb_lvalue or fb_rvalue fallback flags. PRE_P will contain the sequence of GIMPLE statements corresponding to the evaluation of EXPR and all the side-effects that must be executed before the main expression. On exit, the last statement of PRE_P is the core statement being gimplified. For instance, when gimplifying 'if (++a)' the last statement in PRE_P will be 'if (t.1)' where t.1 is the result of pre-incrementing 'a'. POST_P will contain the sequence of GIMPLE statements corresponding to the evaluation of all the side-effects that must be executed after the main expression. If this is NULL, the post side-effects are stored at the end of PRE_P. The reason why the output is split in two is to handle post side-effects explicitly. In some cases, an expression may have inner and outer post side-effects which need to be emitted in an order different from the one given by the recursive traversal. For instance, for the expression (*p--)++ the post side-effects of '--' must actually occur *after* the post side-effects of '++'. However, gimplification will first visit the inner expression, so if a separate POST sequence was not used, the resulting sequence would be: 1 t.1 = *p 2 p = p - 1 3 t.2 = t.1 + 1 4 *p = t.2 However, the post-decrement operation in line #2 must not be evaluated until after the store to *p at line #4, so the correct sequence should be: 1 t.1 = *p 2 t.2 = t.1 + 1 3 *p = t.2 4 p = p - 1 So, by specifying a separate post queue, it is possible to emit the post side-effects in the correct order. If POST_P is NULL, an internal queue will be used. Before returning to the caller, the sequence POST_P is appended to the main output sequence PRE_P. GIMPLE_TEST_F points to a function that takes a tree T and returns nonzero if T is in the GIMPLE form requested by the caller. The GIMPLE predicates are in gimple.c. FALLBACK tells the function what sort of a temporary we want if gimplification cannot produce an expression that complies with GIMPLE_TEST_F. fb_none means that no temporary should be generated fb_rvalue means that an rvalue is OK to generate fb_lvalue means that an lvalue is OK to generate fb_either means that either is OK, but an lvalue is preferable. fb_mayfail means that gimplification may fail (in which case GS_ERROR will be returned) The return value is either GS_ERROR or GS_ALL_DONE, since this function iterates until EXPR is completely gimplified or an error occurs. */ enum gimplify_status gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool (*gimple_test_f) (tree), fallback_t fallback) { tree tmp; gimple_seq internal_pre = NULL; gimple_seq internal_post = NULL; tree save_expr; bool is_statement; location_t saved_location; enum gimplify_status ret; gimple_stmt_iterator pre_last_gsi, post_last_gsi; save_expr = *expr_p; if (save_expr == NULL_TREE) return GS_ALL_DONE; /* If we are gimplifying a top-level statement, PRE_P must be valid. */ is_statement = gimple_test_f == is_gimple_stmt; if (is_statement) gcc_assert (pre_p); /* Consistency checks. */ if (gimple_test_f == is_gimple_reg) gcc_assert (fallback & (fb_rvalue | fb_lvalue)); else if (gimple_test_f == is_gimple_val || gimple_test_f == is_gimple_call_addr || gimple_test_f == is_gimple_condexpr || gimple_test_f == is_gimple_mem_rhs || gimple_test_f == is_gimple_mem_rhs_or_call || gimple_test_f == is_gimple_reg_rhs || gimple_test_f == is_gimple_reg_rhs_or_call || gimple_test_f == is_gimple_asm_val || gimple_test_f == is_gimple_mem_ref_addr) gcc_assert (fallback & fb_rvalue); else if (gimple_test_f == is_gimple_min_lval || gimple_test_f == is_gimple_lvalue) gcc_assert (fallback & fb_lvalue); else if (gimple_test_f == is_gimple_addressable) gcc_assert (fallback & fb_either); else if (gimple_test_f == is_gimple_stmt) gcc_assert (fallback == fb_none); else { /* We should have recognized the GIMPLE_TEST_F predicate to know what kind of fallback to use in case a temporary is needed to hold the value or address of *EXPR_P. */ gcc_unreachable (); } /* We used to check the predicate here and return immediately if it succeeds. This is wrong; the design is for gimplification to be idempotent, and for the predicates to only test for valid forms, not whether they are fully simplified. */ if (pre_p == NULL) pre_p = &internal_pre; if (post_p == NULL) post_p = &internal_post; /* Remember the last statements added to PRE_P and POST_P. Every new statement added by the gimplification helpers needs to be annotated with location information. To centralize the responsibility, we remember the last statement that had been added to both queues before gimplifying *EXPR_P. If gimplification produces new statements in PRE_P and POST_P, those statements will be annotated with the same location information as *EXPR_P. */ pre_last_gsi = gsi_last (*pre_p); post_last_gsi = gsi_last (*post_p); saved_location = input_location; if (save_expr != error_mark_node && EXPR_HAS_LOCATION (*expr_p)) input_location = EXPR_LOCATION (*expr_p); /* Loop over the specific gimplifiers until the toplevel node remains the same. */ do { /* Strip away as many useless type conversions as possible at the toplevel. */ STRIP_USELESS_TYPE_CONVERSION (*expr_p); /* Remember the expr. */ save_expr = *expr_p; /* Die, die, die, my darling. */ if (save_expr == error_mark_node || (TREE_TYPE (save_expr) && TREE_TYPE (save_expr) == error_mark_node)) { ret = GS_ERROR; break; } /* Do any language-specific gimplification. */ ret = ((enum gimplify_status) lang_hooks.gimplify_expr (expr_p, pre_p, post_p)); if (ret == GS_OK) { if (*expr_p == NULL_TREE) break; if (*expr_p != save_expr) continue; } else if (ret != GS_UNHANDLED) break; /* Make sure that all the cases set 'ret' appropriately. */ ret = GS_UNHANDLED; switch (TREE_CODE (*expr_p)) { /* First deal with the special cases. */ case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: ret = gimplify_self_mod_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case ARRAY_REF: case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case VIEW_CONVERT_EXPR: ret = gimplify_compound_lval (expr_p, pre_p, post_p, fallback ? fallback : fb_rvalue); break; case COND_EXPR: ret = gimplify_cond_expr (expr_p, pre_p, fallback); /* C99 code may assign to an array in a structure value of a conditional expression, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); mark_addressable (*expr_p); ret = GS_OK; } break; case STATIC_CHAIN_EXPR: /* Modula-3: This gets converted fairly early, in tree-nested.c. */ ret = GS_ALL_DONE; break; case CALL_EXPR: ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none); /* C99 code may assign to an array in a structure returned from a function, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); mark_addressable (*expr_p); ret = GS_OK; } break; case TREE_LIST: gcc_unreachable (); case COMPOUND_EXPR: ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none); break; case COMPOUND_LITERAL_EXPR: ret = gimplify_compound_literal_expr (expr_p, pre_p); break; case MODIFY_EXPR: case INIT_EXPR: ret = gimplify_modify_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: /* Pass the source location of the outer expression. */ ret = gimplify_boolean_expr (expr_p, saved_location); break; case TRUTH_NOT_EXPR: if (TREE_CODE (TREE_TYPE (*expr_p)) != BOOLEAN_TYPE) { tree type = TREE_TYPE (*expr_p); *expr_p = fold_convert (type, gimple_boolify (*expr_p)); ret = GS_OK; break; } ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case ADDR_EXPR: ret = gimplify_addr_expr (expr_p, pre_p, post_p); break; case VA_ARG_EXPR: ret = gimplify_va_arg_expr (expr_p, pre_p, post_p); break; CASE_CONVERT: if (IS_EMPTY_STMT (*expr_p)) { ret = GS_ALL_DONE; break; } if (VOID_TYPE_P (TREE_TYPE (*expr_p)) || fallback == fb_none) { /* Just strip a conversion to void (or in void context) and try again. */ *expr_p = TREE_OPERAND (*expr_p, 0); ret = GS_OK; break; } ret = gimplify_conversion (expr_p); if (ret == GS_ERROR) break; if (*expr_p != save_expr) break; /* FALLTHRU */ case FIX_TRUNC_EXPR: /* unary_expr: ... | '(' cast ')' val | ... */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case INDIRECT_REF: { bool volatilep = TREE_THIS_VOLATILE (*expr_p); bool notrap = TREE_THIS_NOTRAP (*expr_p); tree saved_ptr_type = TREE_TYPE (TREE_OPERAND (*expr_p, 0)); *expr_p = fold_indirect_ref_loc (input_location, *expr_p); if (*expr_p != save_expr) { ret = GS_OK; break; } ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_reg, fb_rvalue); if (ret == GS_ERROR) break; recalculate_side_effects (*expr_p); *expr_p = fold_build2_loc (input_location, MEM_REF, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0), build_int_cst (saved_ptr_type, 0)); TREE_THIS_VOLATILE (*expr_p) = volatilep; TREE_THIS_NOTRAP (*expr_p) = notrap; ret = GS_OK; break; } /* We arrive here through the various re-gimplifcation paths. */ case MEM_REF: /* First try re-folding the whole thing. */ tmp = fold_binary (MEM_REF, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0), TREE_OPERAND (*expr_p, 1)); if (tmp) { *expr_p = tmp; recalculate_side_effects (*expr_p); ret = GS_OK; break; } ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_mem_ref_addr, fb_rvalue); if (ret == GS_ERROR) break; recalculate_side_effects (*expr_p); ret = GS_ALL_DONE; break; /* Constants need not be gimplified. */ case INTEGER_CST: case REAL_CST: case FIXED_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: ret = GS_ALL_DONE; break; case CONST_DECL: /* If we require an lvalue, such as for ADDR_EXPR, retain the CONST_DECL node. Otherwise the decl is replaceable by its value. */ /* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */ if (fallback & fb_lvalue) ret = GS_ALL_DONE; else { *expr_p = DECL_INITIAL (*expr_p); ret = GS_OK; } break; case DECL_EXPR: ret = gimplify_decl_expr (expr_p, pre_p); break; case BIND_EXPR: ret = gimplify_bind_expr (expr_p, pre_p); break; case LOOP_EXPR: ret = gimplify_loop_expr (expr_p, pre_p); break; case SWITCH_EXPR: ret = gimplify_switch_expr (expr_p, pre_p); break; case EXIT_EXPR: ret = gimplify_exit_expr (expr_p); break; case GOTO_EXPR: /* If the target is not LABEL, then it is a computed jump and the target needs to be gimplified. */ if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL) { ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p, NULL, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) break; } gimplify_seq_add_stmt (pre_p, gimple_build_goto (GOTO_DESTINATION (*expr_p))); ret = GS_ALL_DONE; break; case PREDICT_EXPR: gimplify_seq_add_stmt (pre_p, gimple_build_predict (PREDICT_EXPR_PREDICTOR (*expr_p), PREDICT_EXPR_OUTCOME (*expr_p))); ret = GS_ALL_DONE; break; case LABEL_EXPR: ret = GS_ALL_DONE; gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p)) == current_function_decl); gimplify_seq_add_stmt (pre_p, gimple_build_label (LABEL_EXPR_LABEL (*expr_p))); break; case CASE_LABEL_EXPR: ret = gimplify_case_label_expr (expr_p, pre_p); break; case RETURN_EXPR: ret = gimplify_return_expr (*expr_p, pre_p); break; case CONSTRUCTOR: /* Don't reduce this in place; let gimplify_init_constructor work its magic. Buf if we're just elaborating this for side effects, just gimplify any element that has side-effects. */ if (fallback == fb_none) { unsigned HOST_WIDE_INT ix; tree val; tree temp = NULL_TREE; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (*expr_p), ix, val) if (TREE_SIDE_EFFECTS (val)) append_to_statement_list (val, &temp); *expr_p = temp; ret = temp ? GS_OK : GS_ALL_DONE; } /* C99 code may assign to an array in a constructed structure or union, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ else if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); mark_addressable (*expr_p); ret = GS_OK; } else ret = GS_ALL_DONE; break; /* The following are special cases that are not handled by the original GIMPLE grammar. */ /* SAVE_EXPR nodes are converted into a GIMPLE identifier and eliminated. */ case SAVE_EXPR: ret = gimplify_save_expr (expr_p, pre_p, post_p); break; case BIT_FIELD_REF: { enum gimplify_status r0, r1, r2; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_either); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); ret = MIN (r0, MIN (r1, r2)); } break; case TARGET_MEM_REF: { enum gimplify_status r0 = GS_ALL_DONE, r1 = GS_ALL_DONE; if (TMR_BASE (*expr_p)) r0 = gimplify_expr (&TMR_BASE (*expr_p), pre_p, post_p, is_gimple_mem_ref_addr, fb_either); if (TMR_INDEX (*expr_p)) r1 = gimplify_expr (&TMR_INDEX (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); if (TMR_INDEX2 (*expr_p)) r1 = gimplify_expr (&TMR_INDEX2 (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); /* TMR_STEP and TMR_OFFSET are always integer constants. */ ret = MIN (r0, r1); } break; case NON_LVALUE_EXPR: /* This should have been stripped above. */ gcc_unreachable (); case ASM_EXPR: ret = gimplify_asm_expr (expr_p, pre_p, post_p); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: { gimple_seq eval, cleanup; gimple try_; eval = cleanup = NULL; gimplify_and_add (TREE_OPERAND (*expr_p, 0), &eval); gimplify_and_add (TREE_OPERAND (*expr_p, 1), &cleanup); /* Don't create bogus GIMPLE_TRY with empty cleanup. */ if (gimple_seq_empty_p (cleanup)) { gimple_seq_add_seq (pre_p, eval); ret = GS_ALL_DONE; break; } try_ = gimple_build_try (eval, cleanup, TREE_CODE (*expr_p) == TRY_FINALLY_EXPR ? GIMPLE_TRY_FINALLY : GIMPLE_TRY_CATCH); if (TREE_CODE (*expr_p) == TRY_CATCH_EXPR) gimple_try_set_catch_is_cleanup (try_, TRY_CATCH_IS_CLEANUP (*expr_p)); gimplify_seq_add_stmt (pre_p, try_); ret = GS_ALL_DONE; break; } case CLEANUP_POINT_EXPR: ret = gimplify_cleanup_point_expr (expr_p, pre_p); break; case TARGET_EXPR: ret = gimplify_target_expr (expr_p, pre_p, post_p); break; case CATCH_EXPR: { gimple c; gimple_seq handler = NULL; gimplify_and_add (CATCH_BODY (*expr_p), &handler); c = gimple_build_catch (CATCH_TYPES (*expr_p), handler); gimplify_seq_add_stmt (pre_p, c); ret = GS_ALL_DONE; break; } case EH_FILTER_EXPR: { gimple ehf; gimple_seq failure = NULL; gimplify_and_add (EH_FILTER_FAILURE (*expr_p), &failure); ehf = gimple_build_eh_filter (EH_FILTER_TYPES (*expr_p), failure); gimple_set_no_warning (ehf, TREE_NO_WARNING (*expr_p)); gimplify_seq_add_stmt (pre_p, ehf); ret = GS_ALL_DONE; break; } case OBJ_TYPE_REF: { enum gimplify_status r0, r1; r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); TREE_SIDE_EFFECTS (*expr_p) = 0; ret = MIN (r0, r1); } break; case LABEL_DECL: /* We get here when taking the address of a label. We mark the label as "forced"; meaning it can never be removed and it is a potential target for any computed goto. */ FORCED_LABEL (*expr_p) = 1; ret = GS_ALL_DONE; break; case STATEMENT_LIST: ret = gimplify_statement_list (expr_p, pre_p); break; case WITH_SIZE_EXPR: { gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p == &internal_post ? NULL : post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = GS_ALL_DONE; } break; case VAR_DECL: case PARM_DECL: ret = gimplify_var_or_parm_decl (expr_p); break; case RESULT_DECL: ret = GS_ALL_DONE; break; case SSA_NAME: /* Allow callbacks into the gimplifier during optimization. */ ret = GS_ALL_DONE; break; case OMP_PARALLEL: gcc_unreachable (); ret = GS_ALL_DONE; break; case OMP_TASK: gcc_unreachable (); ret = GS_ALL_DONE; break; case OMP_FOR: gcc_unreachable (); break; case OMP_SECTIONS: case OMP_SINGLE: gcc_unreachable (); ret = GS_ALL_DONE; break; case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: case OMP_ATOMIC: gcc_unreachable (); break; case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: /* Classified as tcc_expression. */ goto expr_2; case FMA_EXPR: /* Classified as tcc_expression. */ goto expr_3; case POINTER_PLUS_EXPR: /* Convert ((type *)A)+offset into &A->field_of_type_and_offset. The second is gimple immediate saving a need for extra statement. */ if (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == INTEGER_CST && (tmp = maybe_fold_offset_to_address (EXPR_LOCATION (*expr_p), TREE_OPERAND (*expr_p, 0), TREE_OPERAND (*expr_p, 1), TREE_TYPE (*expr_p)))) { *expr_p = tmp; ret = GS_OK; break; } /* Convert (void *)&a + 4 into (void *)&a[1]. */ if (TREE_CODE (TREE_OPERAND (*expr_p, 0)) == NOP_EXPR && TREE_CODE (TREE_OPERAND (*expr_p, 1)) == INTEGER_CST && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (*expr_p, 0),0))) && (tmp = maybe_fold_offset_to_address (EXPR_LOCATION (*expr_p), TREE_OPERAND (TREE_OPERAND (*expr_p, 0), 0), TREE_OPERAND (*expr_p, 1), TREE_TYPE (TREE_OPERAND (TREE_OPERAND (*expr_p, 0), 0))))) { *expr_p = fold_convert (TREE_TYPE (*expr_p), tmp); ret = GS_OK; break; } /* FALLTHRU */ default: switch (TREE_CODE_CLASS (TREE_CODE (*expr_p))) { case tcc_comparison: /* Handle comparison of objects of non scalar mode aggregates with a call to memcmp. It would be nice to only have to do this for variable-sized objects, but then we'd have to allow the same nest of reference nodes we allow for MODIFY_EXPR and that's too complex. Compare scalar mode aggregates as scalar mode values. Using memcmp for them would be very inefficient at best, and is plain wrong if bitfields are involved. */ { tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1)); if (!AGGREGATE_TYPE_P (type)) goto expr_2; else if (TYPE_MODE (type) != BLKmode) ret = gimplify_scalar_mode_aggregate_compare (expr_p); else ret = gimplify_variable_sized_compare (expr_p); break; } /* If *EXPR_P does not need to be special-cased, handle it according to its class. */ case tcc_unary: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); break; case tcc_binary: expr_2: { enum gimplify_status r0, r1; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); break; } expr_3: { enum gimplify_status r0, r1, r2; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (MIN (r0, r1), r2); break; } case tcc_declaration: case tcc_constant: ret = GS_ALL_DONE; goto dont_recalculate; default: gcc_unreachable (); } recalculate_side_effects (*expr_p); dont_recalculate: break; } gcc_assert (*expr_p || ret != GS_OK); } while (ret == GS_OK); /* If we encountered an error_mark somewhere nested inside, either stub out the statement or propagate the error back out. */ if (ret == GS_ERROR) { if (is_statement) *expr_p = NULL; goto out; } /* This was only valid as a return value from the langhook, which we handled. Make sure it doesn't escape from any other context. */ gcc_assert (ret != GS_UNHANDLED); if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p)) { /* We aren't looking for a value, and we don't have a valid statement. If it doesn't have side-effects, throw it away. */ if (!TREE_SIDE_EFFECTS (*expr_p)) *expr_p = NULL; else if (!TREE_THIS_VOLATILE (*expr_p)) { /* This is probably a _REF that contains something nested that has side effects. Recurse through the operands to find it. */ enum tree_code code = TREE_CODE (*expr_p); switch (code) { case COMPONENT_REF: case REALPART_EXPR: case IMAGPART_EXPR: case VIEW_CONVERT_EXPR: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); break; case ARRAY_REF: case ARRAY_RANGE_REF: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, gimple_test_f, fallback); break; default: /* Anything else with side-effects must be converted to a valid statement before we get here. */ gcc_unreachable (); } *expr_p = NULL; } else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p)) && TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode) { /* Historically, the compiler has treated a bare reference to a non-BLKmode volatile lvalue as forcing a load. */ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p)); /* Normally, we do not want to create a temporary for a TREE_ADDRESSABLE type because such a type should not be copied by bitwise-assignment. However, we make an exception here, as all we are doing here is ensuring that we read the bytes that make up the type. We use create_tmp_var_raw because create_tmp_var will abort when given a TREE_ADDRESSABLE type. */ tree tmp = create_tmp_var_raw (type, "vol"); gimple_add_tmp_var (tmp); gimplify_assign (tmp, *expr_p, pre_p); *expr_p = NULL; } else /* We can't do anything useful with a volatile reference to an incomplete type, so just throw it away. Likewise for a BLKmode type, since any implicit inner load should already have been turned into an explicit one by the gimplification process. */ *expr_p = NULL; } /* If we are gimplifying at the statement level, we're done. Tack everything together and return. */ if (fallback == fb_none || is_statement) { /* Since *EXPR_P has been converted into a GIMPLE tuple, clear it out for GC to reclaim it. */ *expr_p = NULL_TREE; if (!gimple_seq_empty_p (internal_pre) || !gimple_seq_empty_p (internal_post)) { gimplify_seq_add_seq (&internal_pre, internal_post); gimplify_seq_add_seq (pre_p, internal_pre); } /* The result of gimplifying *EXPR_P is going to be the last few statements in *PRE_P and *POST_P. Add location information to all the statements that were added by the gimplification helpers. */ if (!gimple_seq_empty_p (*pre_p)) annotate_all_with_location_after (*pre_p, pre_last_gsi, input_location); if (!gimple_seq_empty_p (*post_p)) annotate_all_with_location_after (*post_p, post_last_gsi, input_location); goto out; } #if 1 /* ENABLE_GIMPLE_CHECKING */ if (*expr_p) { enum tree_code code = TREE_CODE (*expr_p); /* These expressions should already be in gimple IR form. */ gcc_assert (code != MODIFY_EXPR && code != ASM_EXPR && code != BIND_EXPR && code != CATCH_EXPR && (code != COND_EXPR || gimplify_ctxp->allow_rhs_cond_expr) && code != EH_FILTER_EXPR && code != GOTO_EXPR && code != LABEL_EXPR && code != LOOP_EXPR && code != SWITCH_EXPR && code != TRY_FINALLY_EXPR && code != OMP_CRITICAL && code != OMP_FOR && code != OMP_MASTER && code != OMP_ORDERED && code != OMP_PARALLEL && code != OMP_SECTIONS && code != OMP_SECTION && code != OMP_SINGLE); } #endif /* Otherwise we're gimplifying a subexpression, so the resulting value is interesting. If it's a valid operand that matches GIMPLE_TEST_F, we're done. Unless we are handling some post-effects internally; if that's the case, we need to copy into a temporary before adding the post-effects to POST_P. */ if (gimple_seq_empty_p (internal_post) && (*gimple_test_f) (*expr_p)) goto out; /* Otherwise, we need to create a new temporary for the gimplified expression. */ /* We can't return an lvalue if we have an internal postqueue. The object the lvalue refers to would (probably) be modified by the postqueue; we need to copy the value out first, which means an rvalue. */ if ((fallback & fb_lvalue) && gimple_seq_empty_p (internal_post) && is_gimple_addressable (*expr_p)) { /* An lvalue will do. Take the address of the expression, store it in a temporary, and replace the expression with an INDIRECT_REF of that temporary. */ tmp = build_fold_addr_expr_loc (input_location, *expr_p); gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue); *expr_p = build_simple_mem_ref (tmp); } else if ((fallback & fb_rvalue) && is_gimple_reg_rhs_or_call (*expr_p)) { /* An rvalue will do. Assign the gimplified expression into a new temporary TMP and replace the original expression with TMP. First, make sure that the expression has a type so that it can be assigned into a temporary. */ gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p))); if (!gimple_seq_empty_p (internal_post) || (fallback & fb_lvalue)) /* The postqueue might change the value of the expression between the initialization and use of the temporary, so we can't use a formal temp. FIXME do we care? */ { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); if (TREE_CODE (TREE_TYPE (*expr_p)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (*expr_p)) == VECTOR_TYPE) DECL_GIMPLE_REG_P (*expr_p) = 1; } else *expr_p = get_formal_tmp_var (*expr_p, pre_p); } else { #if 1 /* ENABLE_GIMPLE_CHECKING */ if (!(fallback & fb_mayfail)) { fprintf (stderr, "gimplification failed:\n"); print_generic_expr (stderr, *expr_p, 0); debug_tree (*expr_p); internal_error ("gimplification failed"); } #endif gcc_assert (fallback & fb_mayfail); /* If this is an asm statement, and the user asked for the impossible, don't die. Fail and let gimplify_asm_expr issue an error. */ ret = GS_ERROR; goto out; } /* Make sure the temporary matches our predicate. */ gcc_assert ((*gimple_test_f) (*expr_p)); if (!gimple_seq_empty_p (internal_post)) { annotate_all_with_location (internal_post, input_location); gimplify_seq_add_seq (pre_p, internal_post); } out: input_location = saved_location; return ret; } /* Look through TYPE for variable-sized objects and gimplify each such size that we find. Add to LIST_P any statements generated. */ void gimplify_type_sizes (tree type, gimple_seq *list_p) { tree field, t; if (type == NULL || type == error_mark_node) return; /* We first do the main variant, then copy into any other variants. */ type = TYPE_MAIN_VARIANT (type); /* Avoid infinite recursion. */ if (TYPE_SIZES_GIMPLIFIED (type)) return; TYPE_SIZES_GIMPLIFIED (type) = 1; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p); gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type); TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type); } break; case ARRAY_TYPE: /* These types may not have declarations, so handle them here. */ gimplify_type_sizes (TREE_TYPE (type), list_p); gimplify_type_sizes (TYPE_DOMAIN (type), list_p); /* Ensure VLA bounds aren't removed, for -O0 they should be variables with assigned stack slots, for -O1+ -g they should be tracked by VTA. */ if (!(TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_IGNORED_P (TYPE_NAME (type))) && TYPE_DOMAIN (type) && INTEGRAL_TYPE_P (TYPE_DOMAIN (type))) { t = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); if (t && TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t)) DECL_IGNORED_P (t) = 0; t = TYPE_MAX_VALUE (TYPE_DOMAIN (type)); if (t && TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t)) DECL_IGNORED_P (t) = 0; } break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p); gimplify_one_sizepos (&DECL_SIZE (field), list_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (field), list_p); gimplify_type_sizes (TREE_TYPE (field), list_p); } break; case POINTER_TYPE: case REFERENCE_TYPE: /* We used to recurse on the pointed-to type here, which turned out to be incorrect because its definition might refer to variables not yet initialized at this point if a forward declaration is involved. It was actually useful for anonymous pointed-to types to ensure that the sizes evaluation dominates every possible later use of the values. Restricting to such types here would be safe since there is no possible forward declaration around, but would introduce an undesirable middle-end semantic to anonymity. We then defer to front-ends the responsibility of ensuring that the sizes are evaluated both early and late enough, e.g. by attaching artificial type declarations to the tree. */ break; default: break; } gimplify_one_sizepos (&TYPE_SIZE (type), list_p); gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_SIZE (t) = TYPE_SIZE (type); TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type); TYPE_SIZES_GIMPLIFIED (t) = 1; } } /* A subroutine of gimplify_type_sizes to make sure that *EXPR_P, a size or position, has had all of its SAVE_EXPRs evaluated. We add any required statements to *STMT_P. */ void gimplify_one_sizepos (tree *expr_p, gimple_seq *stmt_p) { tree type, expr = *expr_p; /* We don't do anything if the value isn't there, is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do anything if it's already a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier will want to replace it with a new variable, but that will cause problems if this type is from outside the function. It's OK to have that here. */ if (expr == NULL_TREE || TREE_CONSTANT (expr) || TREE_CODE (expr) == VAR_DECL || CONTAINS_PLACEHOLDER_P (expr)) return; type = TREE_TYPE (expr); *expr_p = unshare_expr (expr); gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue); expr = *expr_p; /* Verify that we've an exact type match with the original expression. In particular, we do not wish to drop a "sizetype" in favour of a type of similar dimensions. We don't want to pollute the generic type-stripping code with this knowledge because it doesn't matter for the bulk of GENERIC/GIMPLE. It only matters that TYPE_SIZE_UNIT and friends retain their "sizetype-ness". */ if (TREE_TYPE (expr) != type && TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)) { tree tmp; gimple stmt; *expr_p = create_tmp_var (type, NULL); tmp = build1 (NOP_EXPR, type, expr); stmt = gimplify_assign (*expr_p, tmp, stmt_p); gimple_set_location (stmt, EXPR_LOC_OR_HERE (expr)); } } /* Gimplify the body of statements pointed to by BODY_P and return a GIMPLE_BIND containing the sequence of GIMPLE statements corresponding to BODY_P. FNDECL is the function decl containing *BODY_P. */ gimple gimplify_body (tree *body_p, tree fndecl, bool do_parms) { location_t saved_location = input_location; gimple_seq parm_stmts, seq; gimple outer_bind; struct gimplify_ctx gctx; timevar_push (TV_TREE_GIMPLIFY); /* Initialize for optimize_insn_for_s{ize,peed}_p possibly called during gimplification. */ default_rtl_profile (); gcc_assert (gimplify_ctxp == NULL); push_gimplify_context (&gctx); /* Unshare most shared trees in the body and in that of any nested functions. It would seem we don't have to do this for nested functions because they are supposed to be output and then the outer function gimplified first, but the g++ front end doesn't always do it that way. */ unshare_body (body_p, fndecl); unvisit_body (body_p, fndecl); if (cgraph_node (fndecl)->origin) nonlocal_vlas = pointer_set_create (); /* Make sure input_location isn't set to something weird. */ input_location = DECL_SOURCE_LOCATION (fndecl); /* Resolve callee-copies. This has to be done before processing the body so that DECL_VALUE_EXPR gets processed correctly. */ parm_stmts = (do_parms) ? gimplify_parameters () : NULL; /* Gimplify the function's body. */ seq = NULL; gimplify_stmt (body_p, &seq); outer_bind = gimple_seq_first_stmt (seq); if (!outer_bind) { outer_bind = gimple_build_nop (); gimplify_seq_add_stmt (&seq, outer_bind); } /* The body must contain exactly one statement, a GIMPLE_BIND. If this is not the case, wrap everything in a GIMPLE_BIND to make it so. */ if (gimple_code (outer_bind) == GIMPLE_BIND && gimple_seq_first (seq) == gimple_seq_last (seq)) ; else outer_bind = gimple_build_bind (NULL_TREE, seq, NULL); *body_p = NULL_TREE; /* If we had callee-copies statements, insert them at the beginning of the function and clear DECL_VALUE_EXPR_P on the parameters. */ if (!gimple_seq_empty_p (parm_stmts)) { tree parm; gimplify_seq_add_seq (&parm_stmts, gimple_bind_body (outer_bind)); gimple_bind_set_body (outer_bind, parm_stmts); for (parm = DECL_ARGUMENTS (current_function_decl); parm; parm = DECL_CHAIN (parm)) if (DECL_HAS_VALUE_EXPR_P (parm)) { DECL_HAS_VALUE_EXPR_P (parm) = 0; DECL_IGNORED_P (parm) = 0; } } if (nonlocal_vlas) { pointer_set_destroy (nonlocal_vlas); nonlocal_vlas = NULL; } pop_gimplify_context (outer_bind); gcc_assert (gimplify_ctxp == NULL); #ifdef ENABLE_TYPES_CHECKING if (!seen_error ()) verify_types_in_gimple_seq (gimple_bind_body (outer_bind)); #endif timevar_pop (TV_TREE_GIMPLIFY); input_location = saved_location; return outer_bind; } typedef char *char_p; /* For DEF_VEC_P. */ DEF_VEC_P(char_p); DEF_VEC_ALLOC_P(char_p,heap); /* Return whether we should exclude FNDECL from instrumentation. */ static bool flag_instrument_functions_exclude_p (tree fndecl) { VEC(char_p,heap) *vec; vec = (VEC(char_p,heap) *) flag_instrument_functions_exclude_functions; if (VEC_length (char_p, vec) > 0) { const char *name; int i; char *s; name = lang_hooks.decl_printable_name (fndecl, 0); FOR_EACH_VEC_ELT (char_p, vec, i, s) if (strstr (name, s) != NULL) return true; } vec = (VEC(char_p,heap) *) flag_instrument_functions_exclude_files; if (VEC_length (char_p, vec) > 0) { const char *name; int i; char *s; name = DECL_SOURCE_FILE (fndecl); FOR_EACH_VEC_ELT (char_p, vec, i, s) if (strstr (name, s) != NULL) return true; } return false; } /* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL node for the function we want to gimplify. Returns the sequence of GIMPLE statements corresponding to the body of FNDECL. */ void gimplify_function_tree (tree fndecl) { tree oldfn, parm, ret; gimple_seq seq; gimple bind; gcc_assert (!gimple_body (fndecl)); oldfn = current_function_decl; current_function_decl = fndecl; if (DECL_STRUCT_FUNCTION (fndecl)) push_cfun (DECL_STRUCT_FUNCTION (fndecl)); else push_struct_function (fndecl); for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = DECL_CHAIN (parm)) { /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if ((TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (parm)) == VECTOR_TYPE) && !TREE_THIS_VOLATILE (parm) && !needs_to_live_in_memory (parm)) DECL_GIMPLE_REG_P (parm) = 1; } ret = DECL_RESULT (fndecl); if ((TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (ret)) == VECTOR_TYPE) && !needs_to_live_in_memory (ret)) DECL_GIMPLE_REG_P (ret) = 1; bind = gimplify_body (&DECL_SAVED_TREE (fndecl), fndecl, true); /* The tree body of the function is no longer needed, replace it with the new GIMPLE body. */ seq = gimple_seq_alloc (); gimple_seq_add_stmt (&seq, bind); gimple_set_body (fndecl, seq); /* If we're instrumenting function entry/exit, then prepend the call to the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to catch the exit hook. */ /* ??? Add some way to ignore exceptions for this TFE. */ if (flag_instrument_function_entry_exit && !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl) && !flag_instrument_functions_exclude_p (fndecl)) { tree x; gimple new_bind; gimple tf; gimple_seq cleanup = NULL, body = NULL; tree tmp_var; gimple call; x = implicit_built_in_decls[BUILT_IN_RETURN_ADDRESS]; call = gimple_build_call (x, 1, integer_zero_node); tmp_var = create_tmp_var (ptr_type_node, "return_addr"); gimple_call_set_lhs (call, tmp_var); gimplify_seq_add_stmt (&cleanup, call); x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_EXIT]; call = gimple_build_call (x, 2, build_fold_addr_expr (current_function_decl), tmp_var); gimplify_seq_add_stmt (&cleanup, call); tf = gimple_build_try (seq, cleanup, GIMPLE_TRY_FINALLY); x = implicit_built_in_decls[BUILT_IN_RETURN_ADDRESS]; call = gimple_build_call (x, 1, integer_zero_node); tmp_var = create_tmp_var (ptr_type_node, "return_addr"); gimple_call_set_lhs (call, tmp_var); gimplify_seq_add_stmt (&body, call); x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_ENTER]; call = gimple_build_call (x, 2, build_fold_addr_expr (current_function_decl), tmp_var); gimplify_seq_add_stmt (&body, call); gimplify_seq_add_stmt (&body, tf); new_bind = gimple_build_bind (NULL, body, gimple_bind_block (bind)); /* Clear the block for BIND, since it is no longer directly inside the function, but within a try block. */ gimple_bind_set_block (bind, NULL); /* Replace the current function body with the body wrapped in the try/finally TF. */ seq = gimple_seq_alloc (); gimple_seq_add_stmt (&seq, new_bind); gimple_set_body (fndecl, seq); } DECL_SAVED_TREE (fndecl) = NULL_TREE; cfun->curr_properties = PROP_gimple_any; current_function_decl = oldfn; pop_cfun (); } /* Some transformations like inlining may invalidate the GIMPLE form for operands. This function traverses all the operands in STMT and gimplifies anything that is not a valid gimple operand. Any new GIMPLE statements are inserted before *GSI_P. */ void gimple_regimplify_operands (gimple stmt, gimple_stmt_iterator *gsi_p) { size_t i, num_ops; tree orig_lhs = NULL_TREE, lhs, t; gimple_seq pre = NULL; gimple post_stmt = NULL; struct gimplify_ctx gctx; push_gimplify_context (&gctx); gimplify_ctxp->into_ssa = gimple_in_ssa_p (cfun); switch (gimple_code (stmt)) { case GIMPLE_COND: gimplify_expr (gimple_cond_lhs_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); gimplify_expr (gimple_cond_rhs_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); break; case GIMPLE_SWITCH: gimplify_expr (gimple_switch_index_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); break; case GIMPLE_OMP_ATOMIC_LOAD: gcc_unreachable (); break; case GIMPLE_ASM: { size_t i, noutputs = gimple_asm_noutputs (stmt); const char *constraint, **oconstraints; bool allows_mem, allows_reg, is_inout; oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); for (i = 0; i < noutputs; i++) { tree op = gimple_asm_output_op (stmt, i); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op))); oconstraints[i] = constraint; parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); } for (i = 0; i < gimple_asm_ninputs (stmt); i++) { tree op = gimple_asm_input_op (stmt, i); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (op))) && allows_mem) allows_reg = 0; if (!allows_reg && allows_mem) gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_gimple_lvalue, fb_lvalue | fb_mayfail); else gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_gimple_asm_val, fb_rvalue); } } break; default: /* NOTE: We start gimplifying operands from last to first to make sure that side-effects on the RHS of calls, assignments and ASMs are executed before the LHS. The ordering is not important for other statements. */ num_ops = gimple_num_ops (stmt); orig_lhs = gimple_get_lhs (stmt); for (i = num_ops; i > 0; i--) { tree op = gimple_op (stmt, i - 1); if (op == NULL_TREE) continue; if (i == 1 && (is_gimple_call (stmt) || is_gimple_assign (stmt))) gimplify_expr (&op, &pre, NULL, is_gimple_lvalue, fb_lvalue); else if (i == 2 && is_gimple_assign (stmt) && num_ops == 2 && get_gimple_rhs_class (gimple_expr_code (stmt)) == GIMPLE_SINGLE_RHS) gimplify_expr (&op, &pre, NULL, rhs_predicate_for (gimple_assign_lhs (stmt)), fb_rvalue); else if (i == 2 && is_gimple_call (stmt)) { if (TREE_CODE (op) == FUNCTION_DECL) continue; gimplify_expr (&op, &pre, NULL, is_gimple_call_addr, fb_rvalue); } else gimplify_expr (&op, &pre, NULL, is_gimple_val, fb_rvalue); gimple_set_op (stmt, i - 1, op); } lhs = gimple_get_lhs (stmt); /* If the LHS changed it in a way that requires a simple RHS, create temporary. */ if (lhs && !is_gimple_reg (lhs)) { bool need_temp = false; if (is_gimple_assign (stmt) && num_ops == 2 && get_gimple_rhs_class (gimple_expr_code (stmt)) == GIMPLE_SINGLE_RHS) gimplify_expr (gimple_assign_rhs1_ptr (stmt), &pre, NULL, rhs_predicate_for (gimple_assign_lhs (stmt)), fb_rvalue); else if (is_gimple_reg (lhs)) { if (is_gimple_reg_type (TREE_TYPE (lhs))) { if (is_gimple_call (stmt)) { i = gimple_call_flags (stmt); if ((i & ECF_LOOPING_CONST_OR_PURE) || !(i & (ECF_CONST | ECF_PURE))) need_temp = true; } if (stmt_can_throw_internal (stmt)) need_temp = true; } } else { if (is_gimple_reg_type (TREE_TYPE (lhs))) need_temp = true; else if (TYPE_MODE (TREE_TYPE (lhs)) != BLKmode) { if (is_gimple_call (stmt)) { tree fndecl = gimple_call_fndecl (stmt); if (!aggregate_value_p (TREE_TYPE (lhs), fndecl) && !(fndecl && DECL_RESULT (fndecl) && DECL_BY_REFERENCE (DECL_RESULT (fndecl)))) need_temp = true; } else need_temp = true; } } if (need_temp) { tree temp = create_tmp_reg (TREE_TYPE (lhs), NULL); if (TREE_CODE (orig_lhs) == SSA_NAME) orig_lhs = SSA_NAME_VAR (orig_lhs); if (gimple_in_ssa_p (cfun)) temp = make_ssa_name (temp, NULL); gimple_set_lhs (stmt, temp); post_stmt = gimple_build_assign (lhs, temp); if (TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = post_stmt; } } break; } if (gimple_referenced_vars (cfun)) for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t)) add_referenced_var (t); if (!gimple_seq_empty_p (pre)) { if (gimple_in_ssa_p (cfun)) { gimple_stmt_iterator i; for (i = gsi_start (pre); !gsi_end_p (i); gsi_next (&i)) mark_symbols_for_renaming (gsi_stmt (i)); } gsi_insert_seq_before (gsi_p, pre, GSI_SAME_STMT); } if (post_stmt) gsi_insert_after (gsi_p, post_stmt, GSI_NEW_STMT); pop_gimplify_context (NULL); } /* Expands EXPR to list of gimple statements STMTS. GIMPLE_TEST_F specifies the predicate that will hold for the result. If VAR is not NULL, make the base variable of the final destination be VAR if suitable. */ tree force_gimple_operand_1 (tree expr, gimple_seq *stmts, gimple_predicate gimple_test_f, tree var) { tree t; enum gimplify_status ret; struct gimplify_ctx gctx; *stmts = NULL; /* gimple_test_f might be more strict than is_gimple_val, make sure we pass both. Just checking gimple_test_f doesn't work because most gimple predicates do not work recursively. */ if (is_gimple_val (expr) && (*gimple_test_f) (expr)) return expr; push_gimplify_context (&gctx); gimplify_ctxp->into_ssa = gimple_in_ssa_p (cfun); gimplify_ctxp->allow_rhs_cond_expr = true; if (var) expr = build2 (MODIFY_EXPR, TREE_TYPE (var), var, expr); if (TREE_CODE (expr) != MODIFY_EXPR && TREE_TYPE (expr) == void_type_node) { gimplify_and_add (expr, stmts); expr = NULL_TREE; } else { ret = gimplify_expr (&expr, stmts, NULL, gimple_test_f, fb_rvalue); gcc_assert (ret != GS_ERROR); } if (gimple_referenced_vars (cfun)) for (t = gimplify_ctxp->temps; t ; t = DECL_CHAIN (t)) add_referenced_var (t); pop_gimplify_context (NULL); return expr; } /* Expands EXPR to list of gimple statements STMTS. If SIMPLE is true, force the result to be either ssa_name or an invariant, otherwise just force it to be a rhs expression. If VAR is not NULL, make the base variable of the final destination be VAR if suitable. */ tree force_gimple_operand (tree expr, gimple_seq *stmts, bool simple, tree var) { return force_gimple_operand_1 (expr, stmts, simple ? is_gimple_val : is_gimple_reg_rhs, var); } /* Invokes force_gimple_operand_1 for EXPR with parameters GIMPLE_TEST_F and VAR. If some statements are produced, emits them at GSI. If BEFORE is true. the statements are appended before GSI, otherwise they are appended after it. M specifies the way GSI moves after insertion (GSI_SAME_STMT or GSI_CONTINUE_LINKING are the usual values). */ tree force_gimple_operand_gsi_1 (gimple_stmt_iterator *gsi, tree expr, gimple_predicate gimple_test_f, tree var, bool before, enum gsi_iterator_update m) { gimple_seq stmts; expr = force_gimple_operand_1 (expr, &stmts, gimple_test_f, var); if (!gimple_seq_empty_p (stmts)) { if (gimple_in_ssa_p (cfun)) { gimple_stmt_iterator i; for (i = gsi_start (stmts); !gsi_end_p (i); gsi_next (&i)) mark_symbols_for_renaming (gsi_stmt (i)); } if (before) gsi_insert_seq_before (gsi, stmts, m); else gsi_insert_seq_after (gsi, stmts, m); } return expr; } /* Invokes force_gimple_operand_1 for EXPR with parameter VAR. If SIMPLE is true, force the result to be either ssa_name or an invariant, otherwise just force it to be a rhs expression. If some statements are produced, emits them at GSI. If BEFORE is true, the statements are appended before GSI, otherwise they are appended after it. M specifies the way GSI moves after insertion (GSI_SAME_STMT or GSI_CONTINUE_LINKING are the usual values). */ tree force_gimple_operand_gsi (gimple_stmt_iterator *gsi, tree expr, bool simple_p, tree var, bool before, enum gsi_iterator_update m) { return force_gimple_operand_gsi_1 (gsi, expr, simple_p ? is_gimple_val : is_gimple_reg_rhs, var, before, m); } EXTERN_C_END #include "gt-gimplify.h"
mkldnn_os.h
/******************************************************************************* * Copyright 2017 NEC Labs America * MODIFICATIONS Copyright 2019 NEC Labs America * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /** \file * handle various compiler/os retrictions */ #ifndef _MKLDNN_OS_H_ #define _MKLDNN_OS_H_ //#include "os_common.hpp" // not available -- we use mkldnn public API only. #if 1 #if defined(__ve) #define strnlen strnlen_s #endif // How is the restrict keyword handled? (disallow it as you encounter errors, please) #if defined(_SX) #elif defined(__clang) // maybe this accepts restrict 'as is' //#elif defined(__ve) // restrict is allowed // #ifndef __restrict // #define __restrict restrict /* ve/musl/include/stdlib.h uses __restrict !!! */ // #elif defined(__ve) // // vednn.h also has this logic, don't redefine // # if defined(__cplusplus) # ifndef __restrict //# warning "nc++ restrict-->__restrict" # define restrict __restrict # endif # else # if !defined(restrict) //# warning "ncc restrict-->__restrict" # define restrict __restrict // ncc uses __restrict in C headers # endif # endif #elif defined(__INTEL_COMPILER) || defined(__GNUC__) #define restrict /*no-restrict*/ #elif defined(WIN32) // ??? #else // ??? #endif // restrict keyword handling // Any restrictions on the alignas attribute? #ifdef __ve #define alignas(x) alignas((x) > 16 ? 16 : (x)) #endif #endif // ENABLE_OPT_PRAGMAS // set to 0 to debug pragma-related incorrect assumptions #if !defined(ENABLE_OPT_PRAGMAS) //#warning "Unknown system: optimization pragmas NOT USED" //#define ENABLE_OPT_PRAGMAS 0/*XXX*/ #define ENABLE_OPT_PRAGMAS 1 #endif // ENABLE_OMP defaults to 1 #if !defined(ENABLE_OMP) #if defined(_SX) #elif defined(__ve) // OMP is not yet supported by ncc/nc++ //#define ENABLE_OMP 0 // at Dec. 25th 2017 release, ncc may support OMP #elif defined(__INTEL_COMPILER) #elif defined(__GNUC__) #else #endif #if !defined(ENABLE_OMP) #define ENABLE_OMP 1 #endif #endif // -------- compiler-specific pragmas -------- // __ve compile does something with pragma omp, but it is not officially supported, // so we use C++11 _Pragma to emit pragmas from macros and customize pragmas to // particular compilers. // // Allocation directives: // VREG : hint that array fits into one simd register // There may be many conditions on array access! // ALLOC_ON_VREG : hint that array fits into multiple simd registers // ALLOC_ON_ADB : hint that array should be "cached" in special memory bank. // // Loop directives apply to an IMMEDIATELY FOLLOWING loop: // ShortLoop : hint that for-loop limit is less than max simd register length // RETAIN : hint that array should be kept accesible (cached) // IVDEP : pretend all ptrs are independent (restrict) // // TODO: SX pre-loop macros must be SINGLE ones, because sxcc REQUIRES // multiple #pragma cdir to be combined, comma-separated. // So you can only use ONE pre-loop macro. If 2 macros, // compiler docs say **both** will be ignored! // // FIXME SX alloc_on_vreg 2nd arg must be a compile-time constant // // Oh! ALLOC_ON_VREG cannot "decay" into RETAIN, because syntax is different // ----------------------------------- //#define BENCHDNN_YPRAGMA(str) do{int ypr=str;}while(0); #define BENCHDNN_MPRAGMA(str) _Pragma(str) #define BENCHDNN_STRINGIZE(...) #__VA_ARGS__ #define PragmaQuote(...) BENCHDNN_MPRAGMA(BENCHDNN_STRINGIZE(__VA_ARGS__)) #if ENABLE_OPT_PRAGMAS && defined(_SX) // SX preprocessor generates _Pragma(XXX) and sxc++ might be ignoring // *some*, based on failure to produce some warning messages. //#warning "SX optimization pragmas IN EFFECT" # define VREG(...) PragmaQuote(cdir vreg(__VA_ARGS__)) # define ALLOC_ON_VREG(...) PragmaQuote(cdir alloc_on_vreg(__VA_ARGS__)) # define ALLOC_ON_ADB(...) PragmaQuote(cdir alloc_on_adb(__VA_ARGS__)) // Is there a pre-for-loop RETAIN for SX? For now, kludge as on_adb. # define RETAIN(...) PragmaQuote(cdir on_adb(__VA_ARGS__)) # define RETAIN1st(var,...) PragmaQuote(cdir on_adb(var)) # define ShortLoop() _Pragma("cdir shortloop") # define ShortLoopTest() /*?*/ # define IVDEP() _Pragma("cdir nodep") # define UNROLL(x) # define PRAGMA_UNROLL #elif ENABLE_OPT_PRAGMAS && defined(__ve) //# warning "__ve optimization pragmas IN EFFECT" # define VREG(...) PragmaQuote(_NEC vreg(__VA_ARGS__)) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) PragmaQuote(_NEC retain(__VA_ARGS__)) # define RETAIN1st(var,...) PragmaQuote(_NEC retain(var)) # define ShortLoop() _Pragma("_NEC shortloop") # define ShortLoopTest() _Pragma("_NEC shortloop_reduction") # define IVDEP() _Pragma("_NEC ivdep") # define UNROLL(x) PragmaQuote(_NEC unroll(x)) # define PRAGMA_UNROLL PragmaQuote(_NEC unroll(4)) #elif ENABLE_OPT_PRAGMAS && defined(__INTEL_COMPILER) // restrict keyword requires the "-restrict" CFLAG; __restrict__ works anyway # define restrict __restrict__ # define IVDEP() _Pragma("ivdep") # define UNROLL(x) PragmaQuote(unroll(x)) # define PRAGMA_UNROLL PragmaQuote(unroll) // TODO: # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() #elif ENABLE_OPT_PRAGMAS && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) //-------------------------------------------- // taken from MSVC code in mkldnn_thread.hpp //# warning "MSVC still supports omp 2.0 only" # define collapse(x) //# define PRAGMA_OMP_SIMD(...) ... below //-------------------------------------------- # define UNROLL(x) # define PRAGMA_UNROLL # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() #elif ENABLE_OPT_PRAGMAS && defined(__GNUC__) //#warning "__GNUC optimization pragmas IN EFFECT" # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() # define IVDEP() _Pragma("GCC ivdep") #if __GNUC__ >= 8 # define UNROLL(x) PragmaQuote(GCC unroll x) # define PRAGMA_UNROLL PragmaQuote(GCC unroll 4) #else # define UNROLL(x) # define PRAGMA_UNROLL #endif #else /* A new system might begin by ignoring the optimization pragmas */ # warning "Please check if _Pragma macros can be defined for this platorm" # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() # define IVDEP() # define UNROLL(x) # define PRAGMA_UNROLL #endif #if ENABLE_OMP # define OMP(...) PragmaQuote(omp __VA_ARGS__) //# if defined(__ve) //# warning "__ve enabling #pragma omp" //# endif # if defined(_SX) // no support for "simd" pragmas # elif defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) # elif defined(__ve) # define PRAGMASIMD(...) PragmaQuote(simd __VA_ARGS__) //# warning "__ve (ncc) ignores simd directive in PRAGMA_OMP_SIMD(...) # define OMPSIMD(...) PragmaQuote(omp __VA_ARGS__) # define PRAGMA_OMP_SIMD(...) PragmaQuote(omp __VA_ARGS__) # else // defined(__GNUC) or intel or ... # define PRAGMASIMD(...) PragmaQuote(simd __VA_ARGS__) # define OMPSIMD(...) PragmaQuote(omp simd __VA_ARGS__) # define PRAGMA_OMP_SIMD(...) PragmaQuote(omp simd __VA_ARGS__) # endif #endif #ifndef PRAGMASIMD # define PRAGMASIMD(...) #endif #ifndef OMPSIMD # define OMPSIMD(...) #endif #ifndef PRAGMA_OMP_SIMD # define PRAGMA_OMP_SIMD(...) #endif #ifndef OMP # define OMP(...) #if defined(REF_LRN_HPP) // mostly ignore: show for cpu_engine compile at least # warning "not enabling #pragma omp (mkldnn_os.h)" #endif #endif #endif // _MKLDNN_OS_H_
Layer_LSTM.h
// // smarties // Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. All rights reserved. // Distributed under the terms of the MIT license. // // Created by Guido Novati (novatig@ethz.ch). // #ifndef smarties_LSTMLayer_h #define smarties_LSTMLayer_h #include "Layers.h" namespace smarties { class LSTMLayer: public Layer { const Uint nInputs, nCells; const std::unique_ptr<Function> cell; public: void requiredParameters(std::vector<Uint>& nWeight, std::vector<Uint>& nBiases ) const override { //cell, input, forget, output gates all linked to inp and prev LSTM output nWeight.push_back(4*nCells * (nInputs + nCells) ); nBiases.push_back(4*nCells); } /* organization of Activation work memory: `suminps` field spans 4 blocks each of size nCells. Each contains the result from a matvec multiplication: for the cell's input neuron and for each gate. Gates during forward overwrite their suminps with the output of the sigmoid. nCells nCells nCells nCells |================| |================| |================| |================| cell' Input input Gate forget Gate output Gate `outvals` field is more complex. First nCells fields will be read by upper layer and by recurrent connection at next time step therefore contain LSTM cell output. Then we store states, cell output b4 outpGate, and dErr/dState |================| |================| |================| |================| LSTM layer output cell states pre-Ogate output state error signal `errvals`: simple again to do backprop with `gemv' |================| |================| |================| |================| dE/dInput dE/dInput Gate dE/dForget Gate dE/dOutput Gate */ void requiredActivation(std::vector<Uint>& sizes, std::vector<Uint>& bOutputs, std::vector<Uint>& bInputs) const override { sizes.push_back(4*nCells); bOutputs.push_back(bOutput); bInputs.push_back(bInput); } void biasInitialValues(const std::vector<Real> init) override { } LSTMLayer(Uint _ID, Uint _nInputs, Uint _nCells, std::string funcType, bool bOut, Uint iLink) : Layer(_ID, _nCells, bOut, false, iLink), nInputs(_nInputs), nCells(_nCells), cell(makeFunction(funcType)) { spanCompInpGrads = _nInputs; } std::string printSpecs() const override { std::ostringstream o; o<<"("<<ID<<") "<<cell->name() <<std::string(bOutput? " output ":" ") <<"LSTM Layer of size:"<<nCells <<" linked to Layer:"<<ID-link <<" of size:"<<nInputs<<"\n"; return o.str(); } void forward( const Activation*const prev, const Activation*const curr, const Parameters*const para) const override { // suminp contains input to all cell inputs and gates // only one matrix-vector multiplication nnReal* const suminp = curr->X(ID); memcpy(suminp, para->B(ID), 4*nCells*sizeof(nnReal)); { const nnReal* const inputs = curr->Y(ID-link); const nnReal* const weight = para->W(ID); for (Uint i = 0; i < nInputs; ++i) { const nnReal* const W = weight + (4*nCells)*i; #pragma omp simd aligned(suminp, inputs, W : VEC_WIDTH) for (Uint o = 0; o < 4*nCells; ++o) suminp[o] += inputs[i] * W[o]; } } if(prev not_eq nullptr) { const nnReal* const inputs = prev->Y(ID); const nnReal* const weight = para->W(ID) +(4*nCells)*nInputs; //first input loop, here input only prev step LSTM's output for (Uint i = 0; i < nCells; ++i) { const nnReal* const W = weight + (4*nCells)*i; #pragma omp simd aligned(suminp, inputs, W : VEC_WIDTH) for (Uint o = 0; o < 4*nCells; ++o) suminp[o] += inputs[i] * W[o]; } } { // Input, forget, output gates output overwrite their input Sigm::_eval(suminp +nCells, suminp +nCells, 3*nCells); // state is placed onto output work mem, shifted by nCells const nnReal*const prevSt = prev==nullptr? nullptr : prev->Y(ID)+nCells; nnReal*const output = curr->Y(ID)+ 0*nCells; nnReal*const currSt = curr->Y(ID)+ 1*nCells; nnReal*const cellOp = curr->Y(ID)+ 2*nCells; const nnReal*const inputG = curr->X(ID)+ 1*nCells; const nnReal*const forgtG = curr->X(ID)+ 2*nCells; const nnReal*const outptG = curr->X(ID)+ 3*nCells; for (Uint o=0; o<nCells; ++o) { const nnReal oldStatePass = prev==nullptr? 0 : prevSt[o] * forgtG[o]; currSt[o] = suminp[o] * inputG[o] + oldStatePass; cellOp[o] = Tanh::_eval(currSt[o]); output[o] = outptG[o] * cellOp[o]; } } } void backward( const Activation*const prev, const Activation*const curr, const Activation*const next, const Parameters*const grad, const Parameters*const para) const override { nnReal*const deltas = curr->E(ID); //error signal from above/future // Also need pre-outGate cell output const nnReal*const cellOutput = curr->Y(ID) + 2*nCells; // Will also need to copy the state's error signal, use last available slot: nnReal*const stateDelta = curr->Y(ID) + 3*nCells; const nnReal*const cellInpt = curr->X(ID); const nnReal*const IGate = curr->X(ID)+ 1*nCells; const nnReal*const FGate = curr->X(ID)+ 2*nCells; const nnReal*const OGate = curr->X(ID)+ 3*nCells; // prevState, nextState's delta and next output of forget gate const nnReal*const prvState = prev==nullptr? nullptr :prev->Y(ID) +1*nCells; const nnReal*const nxtStErr = next==nullptr? nullptr :next->Y(ID) +3*nCells; const nnReal*const nxtFGate = next==nullptr? nullptr :next->X(ID) +2*nCells; for (Uint o=0; o<nCells; ++o) { const nnReal D = deltas[o]; //before overwriting it // | derivative of tanh | const nnReal diff = (1-cellOutput[o]*cellOutput[o]) * deltas[o]; // Compute state's error signal stateDelta[o] = diff*OGate[o] +(next==nullptr?0: nxtStErr[o]*nxtFGate[o]); // Compute deltas for cell input and gates deltas[o+0*nCells] = IGate[o] * stateDelta[o]; // | sigmoid derivative | deltas[o+1*nCells] = IGate[o]*(1-IGate[o]) * cellInpt[o] * stateDelta[o]; if(prev not_eq nullptr) deltas[o+2*nCells] = FGate[o]*(1-FGate[o]) * prvState[o] * stateDelta[o]; else deltas[o+2*nCells] = 0; deltas[o+3*nCells] = OGate[o]*(1-OGate[o]) * D * cellOutput[o]; } Layer::backward(nInputs, 4*nCells, 4*nCells, nCells, prev,curr,next, grad,para); } void initialize(std::mt19937& G, const Parameters*const W, Real initializationFac) const override { const nnReal fac = (initializationFac>0) ? initializationFac : 1; const nnReal init = fac * cell->initFactor(nInputs, nCells); std::uniform_real_distribution<nnReal> dis(-init, init); { // forget gate starts open, inp/out gates are closed nnReal* const BB = W->B(ID); for(Uint o=0*nCells; o<1*nCells; ++o) BB[o]=0; //for(Uint o=1*nCells; o<2*nCells; ++o) BB[o]=dis(*gen)+LSTM_PRIME_FAC; //for(Uint o=2*nCells; o<3*nCells; ++o) BB[o]=dis(*gen)-LSTM_PRIME_FAC; //for(Uint o=3*nCells; o<4*nCells; ++o) BB[o]=dis(*gen)+LSTM_PRIME_FAC; for(Uint o=1*nCells; o<2*nCells; ++o) BB[o]=0-LSTM_PRIME_FAC; for(Uint o=2*nCells; o<3*nCells; ++o) BB[o]=0+LSTM_PRIME_FAC; for(Uint o=3*nCells; o<4*nCells; ++o) BB[o]=0-LSTM_PRIME_FAC; } { nnReal* const weight = W->W(ID); for(Uint w=0; w<4*nCells*(nInputs+nCells); ++w) weight[w] = dis(G); } } size_t save(const Parameters * const para, float * tmp) const override { const nnReal* const bias = para->B(ID); const nnReal* const weight = para->W(ID); for (Uint n=0; n<4*nCells * (nInputs+nCells); ++n) *(tmp++) = (float) weight[n]; for (Uint n=0; n<4*nCells; ++n) *(tmp++) = (float) bias[n]; return 4*nCells * (nInputs+nCells + 1); } size_t restart(const Parameters * const para, const float * tmp) const override { nnReal* const bias = para->B(ID); nnReal* const weight = para->W(ID); for (Uint n=0; n<4*nCells * (nInputs+nCells); ++n) weight[n] = (nnReal) *(tmp++); for (Uint n=0; n<4*nCells; ++n) bias[n] = (nnReal) *(tmp++); return 4*nCells * (nInputs+nCells + 1); } }; } // end namespace smarties #endif // smarties_Quadratic_term_h
questao02.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "omp.h" int main() { int *numeros; int sum_s = 0, sum_p = 0; int entrada, i; double t1_s, t2_s, t1_p, t2_p; srand(time(0)); printf("Informe um numero par positivo: "); scanf("%d", &entrada); if(entrada % 2 != 0) { printf("Entrada invalida"); return 1; } numeros = (int *)malloc(entrada * sizeof(int)); for(i = 0; i < entrada; i++) { numeros[i] = abs(rand()); } t1_s = omp_get_wtime(); for(i = 0; i < entrada; i = i + 2) { sum_s += abs(numeros[i] * numeros[i+1]); } t2_s = omp_get_wtime(); t1_p = omp_get_wtime(); #pragma omp parallel { int mult_local = 0; #pragma omp for schedule(static, 1) for(i = 0; i < entrada; i = i + 2) { mult_local = abs(numeros[i] * numeros[i+1]); } #pragma omp critical { sum_p += mult_local; } } t2_p = omp_get_wtime(); printf("\nSequencial"); printf("\nSoma : %d", sum_s); printf("\nTempo: %lf\n", t2_s - t1_s); printf("\nParalelo"); printf("\nSoma : %d", sum_p); printf("\nTempo: %lf\n", t2_p - t1_p); free(numeros); return 0; }
cachebw.c
/****************************************************************************** ** Copyright (c) 2013-2020, Alexander Heinecke ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ #if 0 #define USE_PERF_COUNTERS #endif #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_PERF_COUNTERS #include "./../common/perf_counter_markers.h" #endif #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10000000 #endif #endif #ifndef NTIMES # define NTIMES 10000000 #endif # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifdef __VSX__ void run_gpr_kernel(size_t* i_data, size_t i_chunkSize) { __asm__ __volatile__("li 12, 0x0\n\t" "1:\n\t" "ld 29, 0(%0)\n\t" "ld 28, 8(%0)\n\t" "ld 27, 16(%0)\n\t" "ld 26, 24(%0)\n\t" "ld 25, 32(%0)\n\t" "ld 24, 40(%0)\n\t" "ld 23, 48(%0)\n\t" "ld 22, 56(%0)\n\t" "ld 21, 64(%0)\n\t" "ld 20, 72(%0)\n\t" "ld 19, 80(%0)\n\t" "ld 18, 88(%0)\n\t" "ld 17, 96(%0)\n\t" "ld 16, 104(%0)\n\t" "ld 15, 112(%0)\n\t" "ld 14, 120(%0)\n\t" "add %0, %0, 12\n\t" "subic. %1, %1, 0x10\n\t" "bne 1b\n\t" : : "r"(i_data), "r"(i_chunkSize) : "r12","r14","r15","r16","r17","r18","r19","r20","r21","r22","r23","r24","r25","r26","r27","r28","r29"); } void run_vsx_kernel(double* i_data, size_t i_chunkSize) { __asm__ __volatile__("li 27, 0x10\n\t" "li 26, 0x20\n\t" "li 25, 0x30\n\t" "li 24, 0x40\n\t" "li 23, 0x50\n\t" "li 22, 0x60\n\t" "li 21, 0x70\n\t" "li 20, 0x80\n\t" "1:\n\t" "lxvd2x 63, 0, %0\n\t" "lxvd2x 62, 27, %0\n\t" "lxvd2x 61, 26, %0\n\t" "lxvd2x 60, 25, %0\n\t" "lxvd2x 59, 24, %0\n\t" "lxvd2x 58, 23, %0\n\t" "lxvd2x 57, 22, %0\n\t" "lxvd2x 56, 21, %0\n\t" "add %0, %0, 20\n\t" "lxvd2x 55, 0, %0\n\t" "lxvd2x 54, 27, %0\n\t" "lxvd2x 53, 26, %0\n\t" "lxvd2x 52, 25, %0\n\t" "lxvd2x 51, 24, %0\n\t" "lxvd2x 50, 23, %0\n\t" "lxvd2x 49, 22, %0\n\t" "lxvd2x 48, 21, %0\n\t" "add %0, %0, 20\n\t" "subic. %1, %1, 0x20\n\t" "bne 1b\n\t" : : "r"(i_data), "r"(i_chunkSize) : "r20", "r21","r22","r23","r24","r25","r26","r27","vs63","vs62","vs61","vs60","vs59","vs58","vs57","vs56","vs55","vs54","vs53","vs52","vs51","vs50","vs49","vs48"); } #endif inline double sec(struct timeval start, struct timeval end) { return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6; } inline void run_benchmark( double* i_data, size_t i_arraySize, size_t i_copies, size_t i_iters ) { // we do manual reduction here as we don't rely on a smart OpenMP implementation #pragma omp parallel { #ifdef _OPENMP size_t l_tid = omp_get_thread_num(); size_t l_numThreads = omp_get_num_threads(); #else size_t l_tid = 0; size_t l_numThreads = 1; #endif size_t l_arraySize = i_arraySize/i_copies; size_t threads_per_copy = l_numThreads / i_copies; double* l_locAddr = i_data + (l_arraySize*(l_tid/threads_per_copy)); size_t* l_parraySize = &(l_arraySize); size_t l_i = 0; for( l_i = 0; l_i < i_iters; l_i++ ) { #ifdef __VSX__ //if (l_tid % 2 == 0) { run_vsx_kernel(l_locAddr, l_arraySize); //} else { // run_gpr_kernel((size_t*)l_locAddr, l_chunkSize); //} #endif #ifdef __ARM_NEON __asm__ __volatile__("mov x0, %0\n\t" "mov x1, %1\n\t"" "1:\n\t" "ld1 {v0.2d}, [x0],16\n\t" "ld1 {v1.2d}, [x0],16\n\t" "ld1 {v2.2d}, [x0],16\n\t" "ld1 {v3.2d}, [x0],16\n\t" "ld1 {v4.2d}, [x0],16\n\t" "ld1 {v5.2d}, [x0],16\n\t" "ld1 {v6.2d}, [x0],16\n\t" "ld1 {v7.2d}, [x0],16\n\t" "ld1 {v8.2d}, [x0],16\n\t" "ld1 {v9.2d}, [x0],16\n\t" "ld1 {v10.2d}, [x0],16\n\t" "ld1 {v11.2d}, [x0],16\n\t" "ld1 {v12.2d}, [x0],16\n\t" "ld1 {v13.2d}, [x0],16\n\t" "ld1 {v14.2d}, [x0],16\n\t" "ld1 {v15.2d}, [x0],16\n\t" "ld1 {v16.2d}, [x0],16\n\t" "ld1 {v17.2d}, [x0],16\n\t" "ld1 {v18.2d}, [x0],16\n\t" "ld1 {v19.2d}, [x0],16\n\t" "ld1 {v20.2d}, [x0],16\n\t" "ld1 {v21.2d}, [x0],16\n\t" "ld1 {v22.2d}, [x0],16\n\t" "ld1 {v23.2d}, [x0],16\n\t" "ld1 {v24.2d}, [x0],16\n\t" "ld1 {v25.2d}, [x0],16\n\t" "ld1 {v26.2d}, [x0],16\n\t" "ld1 {v27.2d}, [x0],16\n\t" "ld1 {v28.2d}, [x0],16\n\t" "ld1 {v29.2d}, [x0],16\n\t" "ld1 {v30.2d}, [x0],16\n\t" "ld1 {v31.2d}, [x0],16\n\t" "sub x1, x1, #64\n\t" "cbnz x1, 1b\n\t" : : "r" (l_locAddr), "r" (l_parraySize) : "x0","x1","v0","v1","v2","v4","v5","v6","v7","v8","v9","v10","v11","v12","v13","v14","v15","v16","v17","v18","v19","v20","v21","v22","v23","v24","v25","v26","v27","v28","v29","v30","v31"); #endif #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%r8\n\t" "movq %1, %%r10\n\t" "movq (%%r10), %%r9\n\t" "1:\n\t" "subq $256, %%r9\n\t" "vmovapd 0(%%r8), %%zmm0\n\t" "vmovapd 64(%%r8), %%zmm1\n\t" "vmovapd 128(%%r8), %%zmm2\n\t" "vmovapd 192(%%r8), %%zmm3\n\t" "vmovapd 256(%%r8), %%zmm4\n\t" "vmovapd 320(%%r8), %%zmm5\n\t" "vmovapd 384(%%r8), %%zmm6\n\t" "vmovapd 448(%%r8), %%zmm7\n\t" "vmovapd 512(%%r8), %%zmm8\n\t" "vmovapd 576(%%r8), %%zmm9\n\t" "vmovapd 640(%%r8), %%zmm10\n\t" "vmovapd 704(%%r8), %%zmm11\n\t" "vmovapd 768(%%r8), %%zmm12\n\t" "vmovapd 832(%%r8), %%zmm13\n\t" "vmovapd 896(%%r8), %%zmm14\n\t" "vmovapd 960(%%r8), %%zmm15\n\t" "vmovapd 1024(%%r8), %%zmm16\n\t" "vmovapd 1088(%%r8), %%zmm17\n\t" "vmovapd 1152(%%r8), %%zmm18\n\t" "vmovapd 1216(%%r8), %%zmm19\n\t" "vmovapd 1280(%%r8), %%zmm20\n\t" "vmovapd 1344(%%r8), %%zmm21\n\t" "vmovapd 1408(%%r8), %%zmm22\n\t" "vmovapd 1472(%%r8), %%zmm23\n\t" "vmovapd 1536(%%r8), %%zmm24\n\t" "vmovapd 1600(%%r8), %%zmm25\n\t" "vmovapd 1664(%%r8), %%zmm26\n\t" "vmovapd 1728(%%r8), %%zmm27\n\t" "vmovapd 1792(%%r8), %%zmm28\n\t" "vmovapd 1856(%%r8), %%zmm29\n\t" "vmovapd 1920(%%r8), %%zmm30\n\t" "vmovapd 1984(%%r8), %%zmm31\n\t" "addq $2048, %%r8\n\t" "cmpq $0, %%r9\n\t" "jg 1b\n\t" : : "m"(l_locAddr), "m"(l_parraySize) : "r8","r9","r10","xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15","xmm16","xmm17","xmm18","xmm19","xmm20","xmm21","xmm22","xmm23","xmm24","xmm25","xmm26","xmm27","xmm28","xmm29","xmm30","xmm31"); #elif __AVX__ __asm__ __volatile__("movq %0, %%r8\n\t" "movq %1, %%r10\n\t" "movq (%%r10), %%r9\n\t" "1:\n\t" "subq $64, %%r9\n\t" "vmovapd 0(%%r8), %%ymm0\n\t" "vmovapd 32(%%r8), %%ymm1\n\t" "vmovapd 64(%%r8), %%ymm2\n\t" "vmovapd 96(%%r8), %%ymm3\n\t" "vmovapd 128(%%r8), %%ymm4\n\t" "vmovapd 160(%%r8), %%ymm5\n\t" "vmovapd 192(%%r8), %%ymm6\n\t" "vmovapd 224(%%r8), %%ymm7\n\t" "vmovapd 256(%%r8), %%ymm8\n\t" "vmovapd 288(%%r8), %%ymm9\n\t" "vmovapd 320(%%r8), %%ymm10\n\t" "vmovapd 352(%%r8), %%ymm11\n\t" "vmovapd 384(%%r8), %%ymm12\n\t" "vmovapd 416(%%r8), %%ymm13\n\t" "vmovapd 448(%%r8), %%ymm14\n\t" "vmovapd 480(%%r8), %%ymm15\n\t" "addq $512, %%r8\n\t" "cmpq $0, %%r9\n\t" "jg 1b\n\t" : : "m"(l_locAddr), "m"(l_parraySize) : "r8","r9","r10","xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15"); #elif __SSE3__ __asm__ __volatile__("movq %0, %%r8\n\t" "movq %1, %%r10\n\t" "movq (%%r10), %%r9\n\t" "1:\n\t" "subq $32, %%r9\n\t" "movapd 0(%%r8), %%xmm0\n\t" "movapd 16(%%r8), %%xmm1\n\t" "movapd 32(%%r8), %%xmm2\n\t" "movapd 48(%%r8), %%xmm3\n\t" "movapd 64(%%r8), %%xmm4\n\t" "movapd 80(%%r8), %%xmm5\n\t" "movapd 96(%%r8), %%xmm6\n\t" "movapd 112(%%r8), %%xmm7\n\t" "movapd 128(%%r8), %%xmm8\n\t" "movapd 144(%%r8), %%xmm9\n\t" "movapd 160(%%r8), %%xmm10\n\t" "movapd 176(%%r8), %%xmm11\n\t" "movapd 192(%%r8), %%xmm12\n\t" "movapd 208(%%r8), %%xmm13\n\t" "movapd 224(%%r8), %%xmm14\n\t" "movapd 240(%%r8), %%xmm15\n\t" "addq $256, %%r8\n\t" "cmpq $0, %%r9\n\t" "jg 1b\n\t" : : "m"(l_locAddr), "m"(l_parraySize) : "r8","r9","r10","xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15"); #endif } } } int main(int argc, char* argv[]) { size_t l_numThreads = 1; size_t l_arraySize_0 = 256; size_t l_arrayFactor = 2; size_t l_arraySteps = 1; size_t l_iters_0 = 1; size_t l_copies = 1; size_t i = 0; #ifdef USE_PERF_COUNTERS ctrs_uncore a, b, s; bw_gibs bw_cnt; setup_uncore_ctrs( CTRS_EXP_DRAM_CAS ); zero_uncore_ctrs( &a ); zero_uncore_ctrs( &b ); zero_uncore_ctrs( &s ); #endif if (argc != 6) { printf("#doubles increase-factor increase-steps copies #reps\n"); return -1; } l_arraySize_0 = atoi(argv[1]); l_arrayFactor = atoi(argv[2]); l_arraySteps = atoi(argv[3]); l_copies = atoi(argv[4]); l_iters_0 = atoi(argv[5]); #ifdef _OPENMP #pragma omp parallel { #pragma omp master l_numThreads = omp_get_num_threads(); } #else l_numThreads = 1; #endif if (l_arraySize_0 % 256 != 0) { printf("ERROR % 256\n"); exit(-1); } printf("Number of threads: %lld\n", l_numThreads); printf("Using %i private Read Buffers\n", l_copies); l_arraySize_0 *= l_copies; printf("KiB-per-core-read,GiB/s,Time\n"); for ( i = 0 ; i < l_arraySteps; ++i ) { double* l_data; size_t l_n = 0; double l_avgTime; struct timeval l_startTime, l_endTime; size_t l_arraySize = (i == 0) ? l_arraySize_0 : l_arraySize_0 * i * l_arrayFactor; size_t l_iters = (i == 0) ? l_iters_0 : l_iters_0 / (i * l_arrayFactor); double l_size = (double)((size_t)l_arraySize*sizeof(double)); // init data posix_memalign((void**)&l_data, 2097152, ((size_t)l_arraySize)*sizeof(double));; #pragma omp parallel for private(l_n) for ( l_n = 0; l_n < l_arraySize; l_n++ ) { l_data[l_n] = (double)l_n; } // pre-heat caches run_benchmark( l_data, l_arraySize, l_copies, 5 ); // run benchmark #ifdef USE_PERF_COUNTERS read_uncore_ctrs( &a ); #endif gettimeofday(&l_startTime, NULL); run_benchmark( l_data, l_arraySize, l_copies, l_iters ); gettimeofday(&l_endTime, NULL); l_avgTime = sec(l_startTime, l_endTime); #ifdef USE_PERF_COUNTERS read_uncore_ctrs( &b ); difa_uncore_ctrs( &a, &b, &s ); divi_uncore_ctrs( &s, l_iters ); #endif // postprocess timing l_avgTime /= (double)l_iters; // output printf("%f,%f,%f\n", (l_size/l_copies)/1024.0, (((l_size/l_copies)*l_numThreads)/(1024.0*1024.0*1024.0))/l_avgTime, l_avgTime); #ifdef USE_PERF_COUNTERS get_cas_ddr_bw_uncore_ctrs( &s, l_avgTime, &bw_cnt ); printf("%f,%f,%f,%f (counters)\n", (l_size/l_copies)/1024.0, bw_cnt.rd, bw_cnt.wr, l_avgTime); #endif free(l_data); } return 0; }
cancel_worksharing.c
// RUN: %libomp-compile && env OMP_CANCELLATION=true %libomp-run | %sort-threads | FileCheck %s // REQUIRES: ompt // Current GOMP interface implementation does not support cancellation; icc 16 does not distinguish between sections and loops // XFAIL: gcc, icc-16 #include "callback.h" #include <unistd.h> int main() { int condition=0; #pragma omp parallel num_threads(2) { int x = 0; int i; #pragma omp for for(i = 0; i < 2; i++) { if(i == 0) { x++; OMPT_SIGNAL(condition); #pragma omp cancel for } else { x++; OMPT_WAIT(condition,1); delay(10000); #pragma omp cancellation point for } } } #pragma omp parallel num_threads(2) { #pragma omp sections { #pragma omp section { OMPT_SIGNAL(condition); #pragma omp cancel sections } #pragma omp section { OMPT_WAIT(condition,2); delay(10000); #pragma omp cancellation point sections } } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_cancel' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[NULL]], task_type=ompt_task_initial=1, has_dependences=no // cancel for and sections // CHECK: {{^}}[[MASTER_ID]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=ompt_cancel_loop|ompt_cancel_activated=20, codeptr_ra={{0x[0-f]*}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=ompt_cancel_sections|ompt_cancel_activated=18, codeptr_ra={{0x[0-f]*}} // CHECK: {{^}}[[OTHER_THREAD_ID:[0-9]+]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=ompt_cancel_loop|ompt_cancel_detected=36, codeptr_ra={{0x[0-f]*}} // CHECK: {{^}}[[OTHER_THREAD_ID:[0-9]+]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=ompt_cancel_sections|ompt_cancel_detected=34, codeptr_ra={{0x[0-f]*}} return 0; }
pr57824.c
/* PR preprocessor/57824 */ /* { dg-do compile } */ /* { dg-options "-std=gnu99 -fopenmp" { target c } } */ /* { dg-options "-std=c++11 -fopenmp" { target c++ } } */ void bar (); void foo () { #pragma omp parallel num_threads(sizeof R"( abc )") bar (); }
GB_matvec_type_name.c
//------------------------------------------------------------------------------ // GB_matvec_type_name: return the name of the type of a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GB_matvec_type_name // return the name of the type of a matrix ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Matrix A, // matrix to query GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_RETURN_IF_NULL (type_name) ; ASSERT_MATRIX_OK (A, "A for type_name", GB0) ; //-------------------------------------------------------------------------- // return the type //-------------------------------------------------------------------------- memcpy (type_name, A->type->name, GxB_MAX_NAME_LEN) ; #pragma omp flush return (GrB_SUCCESS) ; }
helloomp.c
#include <stdio.h> #include <omp.h> /* * This source code can be downloaded from supercomputingblog.com * The purpose of this code is to provide a basic understanding of OpenMP. * */ int main(int argc, char* argv[]) { // This statement should only print once printf("Starting Program!\n"); int nThreads, tid; #pragma omp parallel private(tid) { // This statement will run on each thread. // If there are 4 threads, this will execute 4 times in total tid = omp_get_thread_num(); printf("Running on thread %d\n", tid); if (tid == 0) { nThreads = omp_get_num_threads(); printf("Total number of threads: %d\n", nThreads); } } // We're out of the parallelized secion. // Therefor, this should execute only once printf("Finished!\n"); return 0; }
symmetry.c
/* symmetry.c */ /* Copyright (C) 2008 Atsushi Togo */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "cell.h" #include "lattice.h" #include "mathfunc.h" #include "pointgroup.h" #include "primitive.h" #include "symmetry.h" #include "debug.h" #define NUM_ATOMS_CRITERION_FOR_OPENMP 1000 #define REDUCE_RATE 0.95 #define PI 3.14159265358979323846 /* Tolerance of angle between lattice vectors in degrees */ /* Negative value invokes converter from symprec. */ static double angle_tolerance = -1.0; static int relative_axes[][3] = { { 1, 0, 0}, { 0, 1, 0}, { 0, 0, 1}, {-1, 0, 0}, { 0,-1, 0}, /* 5 */ { 0, 0,-1}, { 0, 1, 1}, { 1, 0, 1}, { 1, 1, 0}, { 0,-1,-1}, /* 10 */ {-1, 0,-1}, {-1,-1, 0}, { 0, 1,-1}, {-1, 0, 1}, { 1,-1, 0}, /* 15 */ { 0,-1, 1}, { 1, 0,-1}, {-1, 1, 0}, { 1, 1, 1}, {-1,-1,-1}, /* 20 */ {-1, 1, 1}, { 1,-1, 1}, { 1, 1,-1}, { 1,-1,-1}, {-1, 1,-1}, /* 25 */ {-1,-1, 1}, }; static int identity[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static int get_index_with_least_atoms(const Cell *cell); static VecDBL * get_translation(SPGCONST int rot[3][3], SPGCONST Cell *cell, const double symprec, const int is_identity); static Symmetry * get_operations(SPGCONST Cell * cell, const double symprec); static Symmetry * reduce_operation(SPGCONST Cell * cell, SPGCONST Symmetry * symmetry, const double symprec); static void search_translation_part(int lat_point_atoms[], SPGCONST Cell * cell, SPGCONST int rot[3][3], const int min_atom_index, const double origin[3], const double symprec, const int is_identity); static int is_overlap_all_atoms(const double test_trans[3], SPGCONST int rot[3][3], SPGCONST Cell * cell, const double symprec, const int is_identity); static PointSymmetry transform_pointsymmetry(SPGCONST PointSymmetry * point_sym_prim, SPGCONST double new_lattice[3][3], SPGCONST double original_lattice[3][3]); static Symmetry * get_space_group_operations(SPGCONST PointSymmetry *lattice_sym, SPGCONST Cell *primitive, const double symprec); static Symmetry * recover_operations_original(SPGCONST Symmetry *symmetry, const VecDBL * pure_trans, SPGCONST Cell *cell, SPGCONST Cell *primitive); static void set_axes(int axes[3][3], const int a1, const int a2, const int a3); static PointSymmetry get_lattice_symmetry(SPGCONST Cell *cell, const double symprec); static int is_identity_metric(SPGCONST double metric_rotated[3][3], SPGCONST double metric_orig[3][3], const double symprec); static double get_angle(SPGCONST double metric[3][3], const int i, const int j); Symmetry * sym_alloc_symmetry(const int size) { Symmetry *symmetry; symmetry = (Symmetry*) malloc(sizeof(Symmetry)); symmetry->size = size; if (size > 0) { if ((symmetry->rot = (int (*)[3][3]) malloc(sizeof(int[3][3]) * size)) == NULL) { warning_print("spglib: Memory could not be allocated "); warning_print("(line %d, %s).\n", __LINE__, __FILE__); exit(1); } if ((symmetry->trans = (double (*)[3]) malloc(sizeof(double[3]) * size)) == NULL) { warning_print("spglib: Memory could not be allocated "); warning_print("(line %d, %s).\n", __LINE__, __FILE__); exit(1); } } return symmetry; } void sym_free_symmetry(Symmetry *symmetry) { if (symmetry->size > 0) { free(symmetry->rot); symmetry->rot = NULL; free(symmetry->trans); symmetry->trans = NULL; } free(symmetry); symmetry = NULL; } Symmetry * sym_get_operation(SPGCONST Cell *cell, const double symprec) { Symmetry *symmetry; symmetry = get_operations(cell, symprec); return symmetry; } /* Number of operations may be reduced with smaller symprec. */ Symmetry * sym_reduce_operation(SPGCONST Cell * cell, SPGCONST Symmetry * symmetry, const double symprec) { return reduce_operation(cell, symmetry, symprec); } int sym_get_multiplicity(SPGCONST Cell *cell, const double symprec) { int multi; VecDBL * trans; trans = get_translation(identity, cell, symprec, 1); multi = trans->size; mat_free_VecDBL(trans); return multi; } VecDBL * sym_get_pure_translation(SPGCONST Cell *cell, const double symprec) { int multi; VecDBL * pure_trans; pure_trans = get_translation(identity, cell, symprec, 1); multi = pure_trans->size; if ((cell->size / multi) * multi == cell->size) { debug_print("sym_get_pure_translation: pure_trans->size = %d\n", multi); } else { ; warning_print("spglib: Finding pure translation failed (line %d, %s).\n", __LINE__, __FILE__); warning_print(" cell->size %d, multi %d\n", cell->size, multi); } return pure_trans; } VecDBL * sym_reduce_pure_translation(SPGCONST Cell * cell, const VecDBL * pure_trans, const double symprec) { int i, multi; Symmetry *symmetry, *symmetry_reduced; VecDBL * pure_trans_reduced; multi = pure_trans->size; symmetry = sym_alloc_symmetry(multi); for (i = 0; i < multi; i++) { mat_copy_matrix_i3(symmetry->rot[i], identity); mat_copy_vector_d3(symmetry->trans[i], pure_trans->vec[i]); } symmetry_reduced = reduce_operation(cell, symmetry, symprec); sym_free_symmetry(symmetry); multi = symmetry_reduced->size; pure_trans_reduced = mat_alloc_VecDBL(multi); for (i = 0; i < multi; i++) { mat_copy_vector_d3(pure_trans_reduced->vec[i], symmetry_reduced->trans[i]); } sym_free_symmetry(symmetry_reduced); return pure_trans_reduced; } void sym_set_angle_tolerance(double tolerance) { angle_tolerance = tolerance; } double sym_get_angle_tolerance(void) { return angle_tolerance; } /* 1) A primitive cell of the input cell is searched. */ /* 2) Pointgroup operations of the primitive cell are obtained. */ /* These are constrained by the input cell lattice pointgroup, */ /* i.e., even if the lattice of the primitive cell has higher */ /* symmetry than that of the input cell, it is not considered. */ /* 3) Spacegroup operations are searched for the primitive cell */ /* using the constrained point group operations. */ /* 4) The spacegroup operations for the primitive cell are */ /* transformed to those of original input cells, if the input cell */ /* was not a primitive cell. */ static Symmetry * get_operations(SPGCONST Cell *cell, const double symprec) { int i, j, attempt; double tolerance; PointSymmetry lattice_sym; Symmetry *symmetry, *symmetry_orig, *symmetry_reduced; Primitive *primitive; debug_print("get_operations:\n"); symmetry_orig = NULL; lattice_sym = get_lattice_symmetry(cell, symprec); if (lattice_sym.size == 0) { debug_print("get_lattice_symmetry failed.\n"); goto end; } primitive = prm_get_primitive(cell, symprec); if (primitive->cell->size == 0) { goto deallocate_and_end; } lattice_sym = transform_pointsymmetry(&lattice_sym, primitive->cell->lattice, cell->lattice); if (lattice_sym.size == 0) { goto deallocate_and_end; } symmetry = get_space_group_operations(&lattice_sym, primitive->cell, symprec); if (symmetry->size > 48) { tolerance = symprec; for (attempt = 0; attempt < 100; attempt++) { tolerance *= REDUCE_RATE; warning_print("spglib: number of symmetry operations for primitive cell > 48 was found. (line %d, %s).\n", __LINE__, __FILE__); warning_print("tolerance is reduced to %f\n", tolerance); symmetry_reduced = reduce_operation(primitive->cell, symmetry, tolerance); sym_free_symmetry(symmetry); symmetry = symmetry_reduced; if (symmetry_reduced->size > 48) { ; } else { break; } } } symmetry_orig = recover_operations_original(symmetry, primitive->pure_trans, cell, primitive->cell); sym_free_symmetry(symmetry); for (i = 0; i < symmetry_orig->size; i++) { for (j = 0; j < 3; j++) { symmetry_orig->trans[i][j] -= mat_Nint(symmetry_orig->trans[i][j]); } } deallocate_and_end: prm_free_primitive(primitive); end: if (! symmetry_orig) { symmetry_orig = sym_alloc_symmetry(0); } return symmetry_orig; } static Symmetry * reduce_operation(SPGCONST Cell * cell, SPGCONST Symmetry * symmetry, const double symprec) { int i, j, num_sym; Symmetry * sym_reduced; PointSymmetry point_symmetry; MatINT *rot; VecDBL *trans; debug_print("reduce_operation:\n"); point_symmetry = get_lattice_symmetry(cell, symprec); rot = mat_alloc_MatINT(symmetry->size); trans = mat_alloc_VecDBL(symmetry->size); num_sym = 0; for (i = 0; i < point_symmetry.size; i++) { for (j = 0; j < symmetry->size; j++) { if (mat_check_identity_matrix_i3(point_symmetry.rot[i], symmetry->rot[j])) { if (is_overlap_all_atoms(symmetry->trans[j], symmetry->rot[j], cell, symprec, 0)) { mat_copy_matrix_i3(rot->mat[num_sym], symmetry->rot[j]); mat_copy_vector_d3(trans->vec[num_sym], symmetry->trans[j]); num_sym++; } } } } sym_reduced = sym_alloc_symmetry(num_sym); for (i = 0; i < num_sym; i++) { mat_copy_matrix_i3(sym_reduced->rot[i], rot->mat[i]); mat_copy_vector_d3(sym_reduced->trans[i], trans->vec[i]); } mat_free_MatINT(rot); mat_free_VecDBL(trans); debug_print(" num_sym %d -> %d\n", symmetry->size, num_sym); return sym_reduced; } /* Look for the translations which satisfy the input symmetry operation. */ /* This function is heaviest in this code. */ static VecDBL * get_translation(SPGCONST int rot[3][3], SPGCONST Cell *cell, const double symprec, const int is_identity) { int i, j, min_atom_index, num_trans = 0; int *is_found; double origin[3]; VecDBL *trans; #ifdef _OPENMP int num_min_type_atoms; int *min_type_atoms; double vec[3]; #endif is_found = (int*) malloc(sizeof(int)*cell->size); for (i = 0; i < cell->size; i++) { is_found[i] = 0; } /* Look for the atom index with least number of atoms within same type */ min_atom_index = get_index_with_least_atoms(cell); /* Set min_atom_index as the origin to measure the distance between atoms. */ mat_multiply_matrix_vector_id3(origin, rot, cell->position[min_atom_index]); #ifdef _OPENMP if (cell->size < NUM_ATOMS_CRITERION_FOR_OPENMP) { search_translation_part(is_found, cell, rot, min_atom_index, origin, symprec, is_identity); } else { /* Collect indices of atoms with the type where the minimum number */ /* of atoms belong. */ min_type_atoms = (int*) malloc(sizeof(int)*cell->size); num_min_type_atoms = 0; for (i = 0; i < cell->size; i++) { if (cell->types[i] == cell->types[min_atom_index]) { min_type_atoms[num_min_type_atoms] = i; num_min_type_atoms++; } } #pragma omp parallel for private(j, vec) for (i = 0; i < num_min_type_atoms; i++) { for (j = 0; j < 3; j++) { vec[j] = cell->position[min_type_atoms[i]][j] - origin[j]; } if (is_overlap_all_atoms(vec, rot, cell, symprec, is_identity)) { is_found[min_type_atoms[i]] = 1; } } free(min_type_atoms); } #else search_translation_part(is_found, cell, rot, min_atom_index, origin, symprec, is_identity); #endif for (i = 0; i < cell->size; i++) { num_trans += is_found[i]; } trans = mat_alloc_VecDBL(num_trans); num_trans = 0; for (i = 0; i < cell->size; i++) { if (is_found[i]) { for (j = 0; j < 3; j++) { trans->vec[num_trans][j] = cell->position[i][j] - origin[j]; } num_trans++; } } free(is_found); is_found = NULL; return trans; } static void search_translation_part(int lat_point_atoms[], SPGCONST Cell * cell, SPGCONST int rot[3][3], const int min_atom_index, const double origin[3], const double symprec, const int is_identity) { int i, j; double vec[3]; for (i = 0; i < cell->size; i++) { if (cell->types[i] != cell->types[min_atom_index]) { continue; } for (j = 0; j < 3; j++) { vec[j] = cell->position[i][j] - origin[j]; } if (is_overlap_all_atoms(vec, rot, cell, symprec, is_identity)) { lat_point_atoms[i] = 1; } } } static int is_overlap_all_atoms(const double trans[3], SPGCONST int rot[3][3], SPGCONST Cell * cell, const double symprec, const int is_identity) { int i, j, k, is_found; double symprec2; double pos_rot[3], d[3]; symprec2 = symprec*symprec; for (i = 0; i < cell->size; i++) { if (is_identity) { /* Identity matrix is treated as special for speed. */ for (j = 0; j < 3; j++) { pos_rot[j] = cell->position[i][j] + trans[j]; } } else { mat_multiply_matrix_vector_id3(pos_rot, rot, cell->position[i]); for (j = 0; j < 3; j++) { pos_rot[j] += trans[j]; } } is_found = 0; for (j = 0; j < cell->size; j++) { if (cell->types[i] == cell->types[j]) { /* here cel_is_overlap can be used, but for the tuning */ /* purpose, write it again */ for (k = 0; k < 3; k++) { d[k] = pos_rot[k] - cell->position[j][k]; d[k] -= mat_Nint(d[k]); } mat_multiply_matrix_vector_d3(d, cell->lattice, d); if (d[0]*d[0]+d[1]*d[1]+d[2]*d[2] < symprec2) { is_found = 1; break; } } } if (! is_found) { goto not_found; } } return 1; /* found */ not_found: return 0; } static int get_index_with_least_atoms(const Cell *cell) { int i, j, min, min_index; int *mapping; mapping = (int *) malloc(sizeof(int) * cell->size); for (i = 0; i < cell->size; i++) { mapping[i] = 0; } for (i = 0; i < cell->size; i++) { for (j = 0; j < cell->size; j++) { if (cell->types[i] == cell->types[j]) { mapping[j]++; break; } } } min = mapping[0]; min_index = 0; for (i = 0; i < cell->size; i++) { if (min > mapping[i] && mapping[i] >0) { min = mapping[i]; min_index = i; } } free(mapping); mapping = NULL; return min_index; } static Symmetry * get_space_group_operations(SPGCONST PointSymmetry *lattice_sym, SPGCONST Cell *cell, const double symprec) { int i, j, num_sym, total_num_sym; VecDBL **trans; Symmetry *symmetry; debug_print("get_space_group_operations:\n"); trans = (VecDBL**) malloc(sizeof(VecDBL*) * lattice_sym->size); total_num_sym = 0; for (i = 0; i < lattice_sym->size; i++) { trans[i] = get_translation(lattice_sym->rot[i], cell, symprec, 0); total_num_sym += trans[i]->size; } symmetry = sym_alloc_symmetry(total_num_sym); num_sym = 0; for (i = 0; i < lattice_sym->size; i++) { for (j = 0; j < trans[i]->size; j++) { mat_copy_vector_d3(symmetry->trans[num_sym + j], trans[i]->vec[j]); mat_copy_matrix_i3(symmetry->rot[num_sym + j], lattice_sym->rot[i]); } num_sym += trans[i]->size; } for (i = 0; i < lattice_sym->size; i++) { mat_free_VecDBL(trans[i]); } free(trans); trans = NULL; return symmetry; } static Symmetry * recover_operations_original(SPGCONST Symmetry *symmetry, const VecDBL * pure_trans, SPGCONST Cell *cell, SPGCONST Cell *primitive) { int i, j, k, multi; double inv_prim_lat[3][3], drot[3][3], trans_mat[3][3], trans_mat_inv[3][3]; Symmetry *symmetry_orig, *sym_tmp; debug_print("recover_operations_original:\n"); multi = pure_trans->size; sym_tmp = sym_alloc_symmetry(symmetry->size); symmetry_orig = sym_alloc_symmetry(symmetry->size * multi); mat_inverse_matrix_d3(inv_prim_lat, primitive->lattice, 0); mat_multiply_matrix_d3(trans_mat, inv_prim_lat, cell->lattice); mat_inverse_matrix_d3(trans_mat_inv, trans_mat, 0); for(i = 0; i < symmetry->size; i++) { mat_copy_matrix_i3(sym_tmp->rot[i], symmetry->rot[i]); mat_copy_vector_d3(sym_tmp->trans[i], symmetry->trans[i]); } for(i = 0; i < symmetry->size; i++) { mat_cast_matrix_3i_to_3d(drot, sym_tmp->rot[i]); mat_get_similar_matrix_d3(drot, drot, trans_mat, 0); mat_cast_matrix_3d_to_3i(sym_tmp->rot[i], drot); mat_multiply_matrix_vector_d3(sym_tmp->trans[i], trans_mat_inv, sym_tmp->trans[i]); } for(i = 0; i < symmetry->size; i++) { for(j = 0; j < multi; j++) { mat_copy_matrix_i3(symmetry_orig->rot[i * multi + j], sym_tmp->rot[i]); for (k = 0; k < 3; k++) { symmetry_orig->trans[i * multi + j][k] = sym_tmp->trans[i][k] + pure_trans->vec[j][k]; } } } sym_free_symmetry(sym_tmp); return symmetry_orig; } static PointSymmetry get_lattice_symmetry(SPGCONST Cell *cell, const double symprec) { int i, j, k, num_sym; int axes[3][3]; double lattice[3][3], min_lattice[3][3]; double metric[3][3], metric_orig[3][3]; PointSymmetry lattice_sym; debug_print("get_lattice_symmetry:\n"); if (! lat_smallest_lattice_vector(min_lattice, cell->lattice, symprec)) { goto err; } mat_get_metric(metric_orig, min_lattice); num_sym = 0; for (i = 0; i < 26; i++) { for (j = 0; j < 26; j++) { for (k = 0; k < 26; k++) { set_axes(axes, i, j, k); if (! ((mat_get_determinant_i3(axes) == 1) || (mat_get_determinant_i3(axes) == -1))) { continue; } mat_multiply_matrix_di3(lattice, min_lattice, axes); mat_get_metric(metric, lattice); if (is_identity_metric(metric, metric_orig, symprec)) { mat_copy_matrix_i3(lattice_sym.rot[num_sym], axes); num_sym++; } if (num_sym > 48) { warning_print("spglib: Too many lattice symmetries was found.\n"); warning_print(" Tolerance may be too large "); warning_print("(line %d, %s).\n", __LINE__, __FILE__); goto err; } } } } lattice_sym.size = num_sym; return transform_pointsymmetry(&lattice_sym, cell->lattice, min_lattice); err: lattice_sym.size = 0; return lattice_sym; } static int is_identity_metric(SPGCONST double metric_rotated[3][3], SPGCONST double metric_orig[3][3], const double symprec) { int i, j, k; int elem_sets[3][2] = {{0, 1}, {0, 2}, {1, 2}}; double cos1, cos2, x, length_ave2, sin_dtheta2; double length_orig[3], length_rot[3]; for (i = 0; i < 3; i++) { length_orig[i] = sqrt(metric_orig[i][i]); length_rot[i] = sqrt(metric_rotated[i][i]); if (mat_Dabs(length_orig[i] - length_rot[i]) > symprec) { goto fail; } } for (i = 0; i < 3; i++) { j = elem_sets[i][0]; k = elem_sets[i][1]; if (angle_tolerance > 0) { if (mat_Dabs(get_angle(metric_orig, j, k) - get_angle(metric_rotated, j, k)) > angle_tolerance) { goto fail; } } else { /* dtheta = arccos(cos(theta1) - arccos(cos(theta2))) */ /* = arccos(c1) - arccos(c2) */ /* = arccos(c1c2 + sqrt((1-c1^2)(1-c2^2))) */ /* sin(dtheta) = sin(arccos(x)) = sqrt(1 - x^2) */ cos1 = metric_orig[j][k] / length_orig[j] / length_orig[k]; cos2 = metric_rotated[j][k] / length_rot[j] / length_rot[k]; x = cos1 * cos2 + sqrt(1 - cos1 * cos1) * sqrt(1 - cos2 * cos2); sin_dtheta2 = 1 - x * x; length_ave2 = ((length_orig[j] + length_rot[j]) * (length_orig[k] + length_rot[k])) / 4; if (sin_dtheta2 > 1e-12) { if (sin_dtheta2 * length_ave2 > symprec * symprec) { goto fail; } } } } return 1; fail: return 0; } static double get_angle(SPGCONST double metric[3][3], const int i, const int j) { double length_i, length_j; length_i = sqrt(metric[i][i]); length_j = sqrt(metric[j][j]); return acos(metric[i][j] / length_i / length_j) / PI * 180; } static PointSymmetry transform_pointsymmetry(SPGCONST PointSymmetry * lat_sym_orig, SPGCONST double new_lattice[3][3], SPGCONST double original_lattice[3][3]) { int i, size; double trans_mat[3][3], inv_mat[3][3], drot[3][3]; PointSymmetry lat_sym_new; mat_inverse_matrix_d3(inv_mat, original_lattice, 0); mat_multiply_matrix_d3(trans_mat, inv_mat, new_lattice); size = 0; for (i = 0; i < lat_sym_orig->size; i++) { mat_cast_matrix_3i_to_3d(drot, lat_sym_orig->rot[i]); mat_get_similar_matrix_d3(drot, drot, trans_mat, 0); /* new_lattice may have lower point symmetry than original_lattice.*/ /* The operations that have non-integer elements are not counted. */ if (mat_is_int_matrix(drot, mat_Dabs(mat_get_determinant_d3(trans_mat)) / 10)) { mat_cast_matrix_3d_to_3i(lat_sym_new.rot[size], drot); if (! abs(mat_get_determinant_i3(lat_sym_new.rot[size])) == 1) { warning_print("spglib: A point symmetry operation is not unimodular."); warning_print("(line %d, %s).\n", __LINE__, __FILE__); goto err; } size++; } } #ifdef SPGWARNING if (! (lat_sym_orig->size == size)) { warning_print("spglib: Some of point symmetry operations were dropped."); warning_print("(line %d, %s).\n", __LINE__, __FILE__); } #endif lat_sym_new.size = size; return lat_sym_new; err: lat_sym_new.size = 0; return lat_sym_new; } static void set_axes(int axes[3][3], const int a1, const int a2, const int a3) { int i; for (i = 0; i < 3; i++) {axes[i][0] = relative_axes[a1][i]; } for (i = 0; i < 3; i++) {axes[i][1] = relative_axes[a2][i]; } for (i = 0; i < 3; i++) {axes[i][2] = relative_axes[a3][i]; } }
GB_binop__isle_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint64) // A.*B function (eWiseMult): GB (_AemultB_01__isle_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__isle_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint64) // A*D function (colscale): GB (_AxD__isle_uint64) // D*A function (rowscale): GB (_DxB__isle_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint64) // C=scalar+B GB (_bind1st__isle_uint64) // C=scalar+B' GB (_bind1st_tran__isle_uint64) // C=A+scalar GB (_bind2nd__isle_uint64) // C=A'+scalar GB (_bind2nd_tran__isle_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT64 || GxB_NO_ISLE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
morn_DES.c
/* Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com> Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "morn_util.h" #define fread(Data,Size,Num,Fl) mException(((int)fread(Data,Size,Num,Fl)!=Num),EXIT,"read file error") uint8_t key_map1[56] = { 57,49,41,33,25,17, 9, 1,58,50,42,34,26,18, 10, 2,59,51,43,35,27, 19,11, 3,60,52,44,36, 63,55,47,39,31,23,15, 7,62,54,46,38,30,22, 14, 6,61,53,45,37,29, 21,13, 5,28,20,12, 4}; uint8_t key_map2[48] = { 14,17,11,24, 1, 5, 3,28,15, 6,21,10, 23,19,12, 4,26, 8, 16, 7,27,20,13, 2, 41,52,31,37,47,55, 30,40,51,45,33,48, 44,49,39,56,34,53, 46,42,50,36,29,32}; uint8_t in_map[64] = { 58,50,42,34,26,18,10,2, 60,52,44,36,28,20,12,4, 62,54,46,38,30,22,14,6, 64,56,48,40,32,24,16,8, 57,49,41,33,25,17, 9,1, 59,51,43,35,27,19,11,3, 61,53,45,37,29,21,13,5, 63,55,47,39,31,23,15,7}; uint8_t des_e[48] = { 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9,10,11,12,13, 12,13,14,15,16,17, 16,17,18,19,20,21, 20,21,22,23,24,25, 24,25,26,27,28,29, 28,29,30,31,32, 1}; uint8_t des_s1[64] = {14,0,4,15,13,7,1,4,2,14,15,2,11,13,8,1,3,10,10,6,6,12,12,11,5,9,9,5,0,3,7,8,4,15,1,12,14,8,8,2,13,4,6,9,2,1,11,7,15,5,12,11,9,3,7,14,3,10,10,0,5,6,0,13}; uint8_t des_s2[64] = {15,3,1,13,8,4,14,7,6,15,11,2,3,8,4,14,9,12,7,0,2,1,13,10,12,6,0,9,5,11,10,5,0,13,14,8,7,10,11,1,10,3,4,15,13,4,1,2,5,11,8,6,12,7,6,12,9,0,3,5,2,14,15,9}; uint8_t des_s3[64] = {10,13,0,7,9,0,14,9,6,3,3,4,15,6,5,10,1,2,13,8,12,5,7,14,11,12,4,11,2,15,8,1,13,1,6,10,4,13,9,0,8,6,15,9,3,8,0,7,11,4,1,15,2,14,12,3,5,11,10,5,14,2,7,12}; uint8_t des_s4[64] = {7,13,13,8,14,11,3,5,0,6,6,15,9,0,10,3,1,4,2,7,8,2,5,12,11,1,12,10,4,14,15,9,10,3,6,15,9,0,0,6,12,10,11,1,7,13,13,8,15,9,1,4,3,5,14,11,5,12,2,7,8,2,4,14}; uint8_t des_s5[64] = {2,14,12,11,4,2,1,12,7,4,10,7,11,13,6,1,8,5,5,0,3,15,15,10,13,3,0,9,14,8,9,6,4,11,2,8,1,12,11,7,10,1,13,14,7,2,8,13,15,6,9,15,12,0,5,9,6,10,3,4,0,5,14,3}; uint8_t des_s6[64] = {12,10,1,15,10,4,15,2,9,7,2,12,6,9,8,5,0,6,13,1,3,13,4,14,14,0,7,11,5,3,11,8,9,4,14,3,15,2,5,12,2,9,8,5,12,15,3,10,7,11,0,14,4,1,10,7,1,6,13,0,11,8,6,13}; uint8_t des_s7[64] = {4,13,11,0,2,11,14,7,15,4,0,9,8,1,13,10,3,14,12,3,9,5,7,12,5,2,10,15,6,8,1,6,1,6,4,11,11,13,13,8,12,1,3,4,7,10,14,7,10,9,15,5,6,0,8,15,0,14,5,2,9,3,2,12}; uint8_t des_s8[64] = {13,1,2,15,8,13,4,8,6,10,15,3,11,7,1,4,10,12,9,5,3,6,14,11,5,0,0,14,12,9,7,2,7,2,11,1,4,14,1,7,9,4,12,10,14,8,2,13,0,15,6,12,10,9,13,0,15,3,3,5,5,6,8,11}; uint8_t des_p[32] = {16, 7,20,21,29,12,28,17,1,15,23,26, 5,18,31,10,2, 8,24,14,32,27, 3, 9,19,13,30, 6,22,11, 4,25}; uint8_t des_out[64] = { 40,8,48,16,56,24,64,32, 39,7,47,15,55,23,63,31, 38,6,46,14,54,22,62,30, 37,5,45,13,53,21,61,29, 36,4,44,12,52,20,60,28, 35,3,43,11,51,19,59,27, 34,2,42,10,50,18,58,26, 33,1,41, 9,49,17,57,25}; void PrintBit(uint64_t data,int bit_num,int n) { int i; for(i=0;i<bit_num;i++) { if(i%n==0) printf(" "); printf("%d",(int)((data>>(63-i))&0x01)); } printf("\n"); } uint64_t DesTransform(uint64_t in,uint8_t *map,int bit_num) { uint64_t out = 0; int i; for(i=0;i<bit_num;i++) out += (((in>>(64-map[i]))&0x01)<<(63-i)); return out; } void DesKey(uint64_t key,uint64_t *sub_key) { // PrintBit(key,64,8); uint64_t buff = DesTransform(key,key_map1,56); // PrintBit(buff,56,7); uint64_t c = buff&0xFFFFFFF000000000; uint64_t d = buff<<28; // PrintBit(c,28,7);PrintBit(d,28,7); #define KEY_SHIFT(in,n) ((((in)<<(n))+((in)>>(28-(n))))&0xFFFFFFF000000000) c=KEY_SHIFT(c,1);d=KEY_SHIFT(d,1);buff=c+(d>>28);sub_key[ 0]=DesTransform(buff,key_map2,48); // PrintBit(c,28,7);PrintBit(d,28,7);PrintBit(buff,56,7);PrintBit(sub_key[ 0],48,6); c=KEY_SHIFT(c,1);d=KEY_SHIFT(d,1);buff=c+(d>>28);sub_key[ 1]=DesTransform(buff,key_map2,48); // PrintBit(c,28,7);PrintBit(d,28,7);PrintBit(buff,56,7);PrintBit(sub_key[ 1],48,6); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[ 2]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[ 3]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[ 4]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[ 5]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[ 6]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[ 7]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,1);d=KEY_SHIFT(d,1);buff=c+(d>>28);sub_key[ 8]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[ 9]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[10]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[11]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[12]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[13]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,2);d=KEY_SHIFT(d,2);buff=c+(d>>28);sub_key[14]=DesTransform(buff,key_map2,48); c=KEY_SHIFT(c,1);d=KEY_SHIFT(d,1);buff=c+(d>>28);sub_key[15]=DesTransform(buff,key_map2,48); // PrintBit(c,28,7);PrintBit(d,28,7);PrintBit(buff,56,7);PrintBit(sub_key[15],48,6); } uint64_t DesSBox(uint64_t in) { uint64_t out=0; uint64_t data; data=des_s1[(in>>58)&0x3F];out+=data<<60; data=des_s2[(in>>52)&0x3F];out+=data<<56; data=des_s3[(in>>46)&0x3F];out+=data<<52; data=des_s4[(in>>40)&0x3F];out+=data<<48; data=des_s5[(in>>34)&0x3F];out+=data<<44; data=des_s6[(in>>28)&0x3F];out+=data<<40; data=des_s7[(in>>22)&0x3F];out+=data<<36; data=des_s8[(in>>16)&0x3F];out+=data<<32; return out; } void ToBigEndian(uint8_t *in,uint8_t *out,int num) { int i; for(i=0;i<num;i++) out[i] = in[num-1-i]; } uint64_t DesEncrypt(uint64_t in,uint64_t key) { uint64_t sub_key[16]; DesKey(key,sub_key); // printf("ddddddddddddd\n"); // PrintBit(key,64,8); // for(int i=0;i<16;i++) // PrintBit(sub_key[i],48,6); // printf("%llx\n",in); // PrintBit(in,64,8); uint64_t data_t = DesTransform(in,in_map,64); // PrintBit(data_t,64,8); uint64_t l = data_t&0xFFFFFFFF00000000; uint64_t r = data_t<<32; uint64_t s; int i; for(i=0;i<16;i++) { // printf("%d:\n",i); // printf("l is:");PrintBit(l,32,8); // printf("r is:");PrintBit(r,32,8); s=r; r=DesTransform(r,des_e,48); // printf("e is:");PrintBit(r,48,6); // printf("k is:");PrintBit(sub_key[i],48,6); r=r^sub_key[i]; // printf("q is:");PrintBit(r,48,6); r=DesSBox(r); // printf("s is:");PrintBit(r,32,4); r=DesTransform(r,des_p,32); // printf("p is:");PrintBit(r,32,8); r=r^l; // printf("c is:");PrintBit(r,32,8); l=s; } // printf("l is:");PrintBit(l,32,8); // printf("r is:");PrintBit(r,32,8); data_t = r+(l>>32); // PrintBit(data_t,64,8); uint64_t out = DesTransform(data_t,des_out,64); return out; } uint64_t DesDecrypt(uint64_t in,uint64_t key) { uint64_t sub_key[16]; DesKey(key,sub_key); // printf("ddddddddddddd\n"); // PrintBit(key,64,8); // for(int i=0;i<16;i++) // PrintBit(sub_key[i],48,6); // printf("%llx\n",in); // PrintBit(in,64,8); uint64_t data_t=0; data_t = DesTransform(in,in_map,64); // PrintBit(data_t,64,8); uint64_t l = data_t&0xFFFFFFFF00000000; uint64_t r = data_t<<32; uint64_t s; int i; for(i=0;i<16;i++) { // printf("%d:\n",i); // printf("l is:");PrintBit(l,32,8); // printf("r is:");PrintBit(r,32,8); s=r; r=DesTransform(r,des_e,48); // printf("e is:");PrintBit(r,48,6); // printf("k is:");PrintBit(sub_key[15-i],48,6); r=r^sub_key[15-i]; // printf("q is:");PrintBit(r,48,6); r=DesSBox(r); // printf("s is:");PrintBit(r,32,4); r=DesTransform(r,des_p,32); // printf("p is:");PrintBit(r,32,8); r=r^l; // printf("c is:");PrintBit(r,32,8); l=s; } // printf("l is:");PrintBit(l,32,8); // printf("r is:");PrintBit(r,32,8); data_t = r+(l>>32); // PrintBit(data_t,64,8); uint64_t out = DesTransform(data_t,des_out,64); return out; } #ifndef DESKEY #define DESKEY MORN_DESKEY #endif void mEncrypt(const char *in_name,const char *out_name,uint64_t key) { int i; if((key==(uint64_t)DFLT)||(key==0)) key = MORN_DESKEY; FILE *fr = fopen( in_name,"rb");mException((fr==NULL),EXIT,"cannot open file %s\n", in_name); fseek(fr,0,SEEK_END);int file_size = ftell(fr);fseek(fr,0,SEEK_SET); int size = (file_size>>3);mException((size==0),EXIT,"invalid file size"); uint64_t *data_in = (uint64_t *)mMalloc((size+1)*sizeof(uint64_t)); uint64_t *data_out= (uint64_t *)mMalloc((size+1)*sizeof(uint64_t)); fread(data_in,file_size,1,fr);fclose(fr); #pragma omp parallel for for(i=0;i<size;i++) data_out[i] = DesEncrypt(data_in[i],key); data_out[size]=data_in[size]; FILE *fw = fopen(out_name,"wb");mException((fw==NULL),EXIT,"cannot open file %s\n",out_name); fwrite(data_out,file_size,1,fw);fclose(fw); } void mDecrypt(const char *in_name,const char *out_name,uint64_t key) { int i; if((key==(uint64_t)DFLT)||(key==0)) key = MORN_DESKEY; FILE *fr = fopen( in_name,"rb");mException((fr==NULL),EXIT,"cannot open file %s\n", in_name); fseek(fr,0,SEEK_END);int file_size = ftell(fr);fseek(fr,0,SEEK_SET); int size = (file_size>>3);mException((size==0),EXIT,"invalid file size"); uint64_t *data_in = (uint64_t *)mMalloc((size+1)*sizeof(uint64_t)); uint64_t *data_out= (uint64_t *)mMalloc((size+1)*sizeof(uint64_t)); fread(data_in,file_size,1,fr);fclose(fr); #pragma omp parallel for for(i=0;i<size;i++) data_out[i] = DesDecrypt(data_in[i],key); data_out[size]=data_in[size]; FILE *fw = fopen(out_name,"wb");mException((fw==NULL),EXIT,"cannot open file %s\n",out_name); fwrite(data_out,file_size,1,fw);fclose(fw); } struct HandleFileDecrypt { char name_out[256]; }; void endFileDecrypt(void *info) { struct HandleFileDecrypt *handle = (struct HandleFileDecrypt *)info; remove(handle->name_out); } #define HASH_FileDecrypt 0x528629ba void mFileDecrypt(MFile *file,uint64_t key) { MHandle *hdl=mHandle(file,FileDecrypt); struct HandleFileDecrypt *handle = (struct HandleFileDecrypt *)(hdl->handle); if(hdl->valid == 1) return; char name_in[256];strcpy(name_in,file->filename); char name[32];sprintf(handle->name_out,"./%stmp",tmpnam(name)); mObjectRedefine(file,handle->name_out);hdl->valid = 1; mDecrypt(name_in,handle->name_out,key); } struct HandleFileEncrypt { char name_out[256]; }; void endFileEncrypt(void *info) { struct HandleFileEncrypt *handle = (struct HandleFileEncrypt *)info; remove(handle->name_out); } #define HASH_FileEncrypt 0x64d9b684 void mFileEncrypt(MFile *file,uint64_t key) { MHandle *hdl=mHandle(file,FileEncrypt); struct HandleFileEncrypt *handle = (struct HandleFileEncrypt *)(hdl->handle); if(hdl->valid == 1) return; char name_in[256];strcpy(name_in,file->filename); char name[32];sprintf(handle->name_out,"./%stmp",tmpnam(name)); mObjectRedefine(file,handle->name_out);hdl->valid = 1; mEncrypt(name_in,handle->name_out,key); }
GB_binop__first_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fc32) // A*D function (colscale): GB (_AxD__first_fc32) // D*A function (rowscale): GB (_DxB__first_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__first_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__first_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fc32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FC32 || GxB_NO_FIRST_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_fc32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
multi_node_benchmark.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #pragma once #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include <LightGBM/json11.hpp> #include "score_updater.hpp" using namespace json11; namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class MultiNodeBenchmark : public GBDTBase { public: /*! * \brief Constructor */ MultiNodeBenchmark(); /*! * \brief Destructor */ ~MultiNodeBenchmark(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config *gbdt_config, const Dataset *train_data, const ObjectiveFunction *objective_function, const std::vector<const Metric *> &training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting *other) override { auto other_gbdt = reinterpret_cast<const MultiNodeBenchmark *>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto &tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto &tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset *train_data, const ObjectiveFunction *objective_function, const std::vector<const Metric *> &training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config *gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset *valid_data, const std::vector<const Metric *> &valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string &model_output_path) override; void RefitTree(const std::vector<std::vector<int>> &tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t *gradients, const score_t *hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double *GetTrainingScore(int64_t *out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double *out_result, int64_t *out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double *, double *, const PredictionEarlyStopInstance *) const override {} void PredictRawByMap(const std::unordered_map<int, double> &, double *, const PredictionEarlyStopInstance *) const override {} void Predict(const double *, double *, const PredictionEarlyStopInstance *) const override {} void PredictByMap(const std::unordered_map<int, double> &, double *, const PredictionEarlyStopInstance *) const override {} void PredictLeafIndex(const double *, double *) const override {} void PredictLeafIndexByMap(const std::unordered_map<int, double> &, double *) const override {} void PredictContrib(const double *, double *, const PredictionEarlyStopInstance *) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int, int) const override { return ""; } /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int) const override { return ""; }; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int, const char *) const override { return true; } /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int , int , const char *) const override { return true; } /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ std::string SaveModelToString(int , int ) const override { return ""; } /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char *, size_t ) override { return true; } /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int , int) const override { std::vector<double> ret; return ret; } /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char *SubModelName() const override { return "tree"; } protected: /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config *config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); /*! * \brief Helper function for bagging, used for multi-threading optimization * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BaggingHelper(Random *cur_rand, data_size_t start, data_size_t cnt, data_size_t *buffer); /*! * \brief Helper function for bagging, used for multi-threading optimization, balanced sampling * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BalancedBaggingHelper(Random *cur_rand, data_size_t start, data_size_t cnt, data_size_t *buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree *tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric *metric, const double *score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset *train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction *objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric *> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric *>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; Json forced_splits_json_; }; } // namespace LightGBM
LM.h
/** * Copyright (c) 2021 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #pragma once #include "../recursive/Recursive.h" namespace Saiga { template <typename T> void applyLMDiagonalInner(T& diag, double lambda = 1.00e-04, double min_lm_diagonal = 1e-6, double max_lm_diagonal = 1e32) { for (int k = 0; k < diag.rows(); ++k) { auto& value = diag.diagonal()(k); value = value + lambda * value; value = clamp(value, min_lm_diagonal, max_lm_diagonal); } } /** * Applies the Levenberg Marquarad Diagonal update to a recursive diagonal matrix. * * U = U + clamp(diag(U) * lambda,min,max) */ template <typename T> void applyLMDiagonal(Eigen::DiagonalMatrix<T, -1>& U, double lambda = 1.00e-04, double min_lm_diagonal = 1e-6, double max_lm_diagonal = 1e32) { for (int i = 0; i < U.rows(); ++i) { auto& diag = U.diagonal()(i).get(); applyLMDiagonalInner(diag, lambda, min_lm_diagonal, max_lm_diagonal); } } template <typename T> void applyLMDiagonal_omp(Eigen::DiagonalMatrix<T, -1>& U, double lambda = 1.00e-04, double min_lm_diagonal = 1e-6, double max_lm_diagonal = 1e32) { #pragma omp for for (int i = 0; i < U.rows(); ++i) { auto& diag = U.diagonal()(i).get(); applyLMDiagonalInner(diag, lambda, min_lm_diagonal, max_lm_diagonal); } } /** * Simplified LM diagonal update, used by the g2o framwork * * U = U + ID * lambda */ template <typename T> void applyLMDiagonalG2O(Eigen::DiagonalMatrix<T, -1>& U, double lambda = 1.00e-04) { for (int i = 0; i < U.rows(); ++i) { auto& diag = U.diagonal()(i).get(); for (int k = 0; k < diag.RowsAtCompileTime; ++k) { auto& value = diag.diagonal()(k); value = value + lambda; } } } inline void updateLambda(double& lambda, bool success) { if (success) { lambda /= 2.0; } else { lambda *= 2.0; } } } // namespace Saiga
GB_unaryop__ainv_uint8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_uint32 // op(A') function: GB_tran__ainv_uint8_uint32 // C type: uint8_t // A type: uint32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_uint32 ( uint8_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__atan_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__atan_fc32_fc32 // op(A') function: GB_unop_tran__atan_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = catanf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = catanf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = catanf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__atan_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = catanf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__atan_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fill_ints.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <complex.h> #include "config.h" #include "cint.h" #define NCTRMAX 72 static void axpy_s1(double complex **out, double *in, double complex *expLk, int nkpts, int comp, size_t off, size_t ni, size_t nij, size_t nijk, size_t di, size_t dj, size_t dk) { const size_t dij = di * dj; int i, j, k, ic, ik; double complex *out_ik, *pout; double *pin; for (ic = 0; ic < comp; ic++) { for (ik = 0; ik < nkpts; ik++) { out_ik = out[ik] + off; for (k = 0; k < dk; k++) { pout = out_ik + k * nij; pin = in + k * dij; for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { pout[j*ni+i] += pin[j*di+i] * expLk[ik]; } } } } off += nijk; in += dij * dk; } } /* * out[naoi,naoj,naok,comp] in F-order */ void PBCnr3c_fill_s1(int (*intor)(), double complex **out, double complex *expLk, int nkpts, int comp, int jsh, int ksh, double *buf, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t nij = naoi * naoj; const size_t nijk = nij * naok; jsh += jsh0; ksh += ksh0; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dk = ao_loc[ksh+1] - ao_loc[ksh]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int kp = ao_loc[ksh] - ao_loc[ksh0]; const size_t off = kp * nij + jp * naoi; int ish, di, i0; int shls[3]; shls[1] = jsh; shls[2] = ksh; for (ish = ish0; ish < ish1; ish++) { shls[0] = ish; i0 = ao_loc[ish ] - ao_loc[ish0]; di = ao_loc[ish+1] - ao_loc[ish]; if ((*intor)(buf, shls, atm, natm, bas, nbas, env, cintopt)) { axpy_s1(out, buf, expLk, nkpts, comp, off+i0, naoi, nij, nijk, di, dj, dk); } } } void PBCnr3c_loop(int (*intor)(), void (*fill)(), double complex **eri, double complex *expLk, int nkpts, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int njsh = jsh1 - jsh0; const int nksh = ksh1 - ksh0; #pragma omp parallel default(none) \ shared(intor, fill, eri, expLk, nkpts, comp, \ shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env) { int jsh, ksh, jk; double *buf = (double *)malloc(sizeof(double)*NCTRMAX*NCTRMAX*NCTRMAX*comp); #pragma omp for schedule(dynamic) for (jk = 0; jk < njsh*nksh; jk++) { ksh = jk / njsh; jsh = jk % njsh; (*fill)(intor, eri, expLk, nkpts, comp, jsh, ksh, buf, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } } static void shift_bas(double *xyz, int *ptr_coords, double *L, int nxyz, double *env) { int i, p; for (i = 0; i < nxyz; i++) { p = ptr_coords[i]; env[p+0] = xyz[i*3+0] + L[0]; env[p+1] = xyz[i*3+1] + L[1]; env[p+2] = xyz[i*3+2] + L[2]; } } void PBCnr3c_drv(int (*intor)(), void (*fill)(), double complex **eri, double *xyz, int *ptr_coords, int nxyz, double *Ls, int nimgs, double complex *expLk, int nkpts, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int m; for (m = 0; m < nimgs; m++) { shift_bas(xyz, ptr_coords, Ls+m*3, nxyz, env); PBCnr3c_loop(intor, fill, eri, expLk+m*nkpts, nkpts, comp, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } } void PBCnr2c2e_fill_s1(int (*intor)(), double complex **out, double complex *expLk, int nkpts, int comp, int jsh, int ksh, double *buf, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t nij = naoi * naoj; jsh += jsh0; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const size_t off = jp * naoi; int ish, di, i0; int shls[2]; shls[1] = jsh; for (ish = ish0; ish < ish1; ish++) { shls[0] = ish; i0 = ao_loc[ish ] - ao_loc[ish0]; di = ao_loc[ish+1] - ao_loc[ish]; if ((*intor)(buf, shls, atm, natm, bas, nbas, env, cintopt)) { axpy_s1(out, buf, expLk, nkpts, comp, off+i0, naoi, nij, nij, di, dj, 1); } } } void PBCnr2c2e_fill_s2(int (*intor)(), double complex **out, double complex *expLk, int nkpts, int comp, int jsh, int ksh, double *buf, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t nij = naoi * naoj; const int dj = ao_loc[jsh+jsh0+1] - ao_loc[jsh+jsh0]; const int jp = ao_loc[jsh+jsh0] - ao_loc[jsh0]; const size_t off = jp * naoi; int i, ish, di, i0; int shls[2]; shls[1] = jsh + jsh0; for (i = 0; i <= jsh; i++) { ish = i + ish0; shls[0] = ish; i0 = ao_loc[ish ] - ao_loc[ish0]; di = ao_loc[ish+1] - ao_loc[ish]; if ((*intor)(buf, shls, atm, natm, bas, nbas, env, cintopt)) { axpy_s1(out, buf, expLk, nkpts, comp, off+i0, naoi, nij, nij, di, dj, 1); } } } void PBCnr2c_fill_s1(int (*intor)(), double complex **out, double complex *expLk, int nkpts, int comp, int jsh, int ksh, double *buf, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t nij = naoi * naoj; jsh += jsh0; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const size_t off = jp * naoi; int ish, di, i0; int shls[2]; shls[1] = jsh; for (ish = ish0; ish < ish1; ish++) { shls[0] = ish; i0 = ao_loc[ish ] - ao_loc[ish0]; di = ao_loc[ish+1] - ao_loc[ish]; if ((*intor)(buf, shls, atm, natm, bas, nbas, env)) { axpy_s1(out, buf, expLk, nkpts, comp, off+i0, naoi, nij, nij, di, dj, 1); } } } void PBCnr2c_fill_s2(int (*intor)(), double complex **out, double complex *expLk, int nkpts, int comp, int jsh, int ksh, double *buf, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t nij = naoi * naoj; const int dj = ao_loc[jsh+jsh0+1] - ao_loc[jsh+jsh0]; const int jp = ao_loc[jsh+jsh0] - ao_loc[jsh0]; const size_t off = jp * naoi; int i, ish, di, i0; int shls[2]; shls[1] = jsh + jsh0; for (i = 0; i <= jsh; i++) { ish = i + ish0; shls[0] = ish; i0 = ao_loc[ish ] - ao_loc[ish0]; di = ao_loc[ish+1] - ao_loc[ish]; if ((*intor)(buf, shls, atm, natm, bas, nbas, env)) { axpy_s1(out, buf, expLk, nkpts, comp, off+i0, naoi, nij, nij, di, dj, 1); } } } void PBCnr2c_drv(int (*intor)(), void (*fill)(), double complex **out, double *xyz, int *ptr_coords, int nxyz, double *Ls, int nimgs, double complex *expLk, int nkpts, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int shls_slice_3c[6]; shls_slice_3c[0] = shls_slice[0]; shls_slice_3c[1] = shls_slice[1]; shls_slice_3c[2] = shls_slice[2]; shls_slice_3c[3] = shls_slice[3]; shls_slice_3c[4] = 0; shls_slice_3c[5] = 1; int m; for (m = 0; m < nimgs; m++) { shift_bas(xyz, ptr_coords, Ls+m*3, nxyz, env); PBCnr3c_loop(intor, fill, out, expLk+m*nkpts, nkpts, comp, shls_slice_3c, ao_loc, cintopt, atm, natm, bas, nbas, env); } }
3_parallel_queue_first_n_push.c
/* Program : 3 Author : Anish Topic : Write a C program using OpenMP features to create two parallel threads. The first thread should insert the first ‘N’ natural numbers into a queue in sequence, and the second thread should remove the numbers from the queue. */ #include<stdio.h> #include<omp.h> #include<stdlib.h> int main() { int n; printf("\n ENTER THE VALUE OF N \n"); scanf("%d",&n); int id,d,Q[n],rear=-1,front=0,i=1; omp_set_dynamic(0); #pragma omp parallel num_threads(2) { id=omp_get_thread_num(); if(id==0) //insert { while(1) { #pragma omp critical { if(rear<n-1) { Q[++rear]=i; printf("\n INSERTED ITEM IS %d",i); i++; } else printf("\n NO SPACE"); fgetc(stdin); } } } else while(1) //pop { #pragma omp critical { if(front<=rear) { d=Q[front]; front++; printf("\n DELETED ITEM IS %d",d); } else printf("\n NO ITEMS TO DELETE"); fgetc(stdin); } } } return 0; }
SPGrid_Page_Map.h
//!##################################################################### //! \file SPGrid_Page_Map.h //!##################################################################### // Class SPGrid_Page_Map //###################################################################### #ifndef __SPGrid_Page_Map__ #define __SPGrid_Page_Map__ #include <nova/SPGrid/Core/SPGrid_Geometry.h> #include <stdint.h> #include <vector> namespace SPGrid{ class SPGrid_Page_Map { public: const uint64_t map_size; // Size of the page map, in uint64_t units. Each entry corresponds to 64 4KB pages. uint64_t* page_map; // The actual page map - a bitmap structured as an array of 64-bit entries. bool dirty; // Indicates that block offsets are inconsistent with the page map (perhaps for a good reason, if only one of them is used). std::vector<uint64_t> block_offsets; // Alternative representation as a list of linearized offsets. Created on demand. // Make the pagemap class noncopyable SPGrid_Page_Map(const SPGrid_Page_Map&) = delete; SPGrid_Page_Map& operator=(const SPGrid_Page_Map&) = delete; template<int d> SPGrid_Page_Map(const SPGrid_Geometry<d>& geometry) :map_size((geometry.Padded_Volume()/geometry.Elements_Per_Block()+0x3fUL)>>6) { page_map = static_cast<uint64_t*>(Raw_Allocate(map_size*sizeof(uint64_t))); dirty = false; } ~SPGrid_Page_Map() { Raw_Deallocate(page_map,map_size*sizeof(uint64_t)); } void Clear_Page_Map() { Raw_Deallocate(page_map,map_size*sizeof(uint64_t)); page_map = static_cast<uint64_t*>(Raw_Allocate(map_size*sizeof(uint64_t))); dirty = true; } void Clear_Blocks() { std::vector<uint64_t>().swap(block_offsets); dirty = true; } void Clear() { Clear_Page_Map(); Clear_Blocks(); dirty = false; } void Append_Pages(const std::vector<uint64_t>& page_offsets) { for(auto it=page_offsets.cbegin();it!=page_offsets.cend();++it) Set_Page(*it); Update_Block_Offsets(); } void Set_Page(const uint64_t offset) { uint64_t mask = 1UL << (offset>>12 & 0x3f); uint64_t& entry = page_map[offset>>18]; if(mask & ~entry){ #pragma omp atomic entry |= mask; } if(!dirty) dirty = true; // Important to avoid unnecessary write sharing } bool Test_Page(const uint64_t offset) const { uint64_t mask = 1UL << (offset>>12 & 0x3f); const uint64_t& entry = page_map[offset>>18]; return entry&mask; } std::pair<const uint64_t*,unsigned> Get_Blocks() const { if(block_offsets.size()) return std::pair<const uint64_t*,unsigned>(&block_offsets[0],block_offsets.size()); else return std::pair<const uint64_t*,unsigned>((const uint64_t*)0,0); } void Update_Block_Offsets() { if(dirty) { std::vector<uint64_t> new_block_offsets(Generate_Block_Offsets()); block_offsets.swap(new_block_offsets); } dirty = false; } // This implementation is currently suboptimal in that it will touch the entirety of the page map. // It should perferably be implemented using mincore() instead, to selectively query only resident pages. std::vector<uint64_t> Generate_Block_Offsets() { std::vector<uint64_t> block_offsets; for(uint64_t entry=0;entry<map_size;entry++) if(page_map[entry]) for(uint64_t pos=0;pos<64;pos++) if(page_map[entry]&(1UL<<pos)) block_offsets.push_back((entry<<18)|(pos<<12)); return block_offsets; } //###################################################################### }; } #endif
block-7.c
// { dg-do compile } void foo() { int i, j; for (i = 0; i < 10; ++i) { #pragma omp for for (j = ({ continue; 0; }); // { dg-error "invalid branch to/from OpenMP structured block" } j < ({ continue; 10; }); // { dg-error "invalid branch to/from OpenMP structured block" } j += ({ continue; 1; })) // { dg-error "invalid branch to/from OpenMP structured block" } continue; #pragma omp for for (j = ({ break; 0; }); // { dg-error "invalid branch to/from OpenMP structured block" } j < ({ break; 10; }); // { dg-error "invalid branch to/from OpenMP structured block" } j += ({ break; 1; })) // { dg-error "invalid branch to/from OpenMP structured block" } break; // { dg-error "break" } } }
main.c
/* All changes to code are copyright, 2017, Jenniffer Estrada, jmestrada@unm.edu Research projects -- UNM and Jenniffer Estrada */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <omp.h> #define PI 3.14159265 typedef enum { FILTER_AVG, FILTER_GAUSS } filter_type; typedef struct { int radius; double **matrix; int type; } FILTER; //FILTER *filter_create_avg(int radius); //FILTER *filter_create_gauss(int radius, double sigma); //void filter_print(FILTER *filter); //void filter_free(FILTER *filter); typedef struct { unsigned char R; unsigned char G; unsigned char B; } pixel; //.ppm image typedef struct { char header[3]; int width, height; int color_depth; pixel **pixels; } IMAGE; // HANDLE IMAGE READ AND WRITE IMAGE *image_load(const char *image_name) { //Declare image struct IMAGE *image = (IMAGE*) malloc( sizeof(IMAGE) ); //Open file FILE *file = fopen(image_name, "r"); if(!file) return NULL; //Read image info fscanf(file, "%s", image->header); fscanf(file, "%d %d %d", &(image->width), &(image->height), &(image->color_depth)); //Alocate memory for pixels image->pixels = (pixel**) malloc(image->height * sizeof(pixel*)); int i, j; for(i = 0; i < image->height; i++) image->pixels[i] = (pixel*) malloc(image->width * sizeof(pixel)); //Read pixels for(i = 0; i < image->height; i++) for(j = 0; j < image->width; j++) fscanf(file, "%c%c%c", &(image->pixels[i][j].R), &(image->pixels[i][j].G), &(image->pixels[i][j].B)); printf("image height width= %d x %d \n",image->height, image->width); //Close file fclose(file); return image; } int image_write(IMAGE *image, const char *file_name) { //Open file FILE *file = fopen(file_name, "w"); if(!file) return 0; //Write image info fprintf(file, "%s\n%d %d\n%d", image->header, image->width, image->height, image->color_depth); //Write pixels int i, j; for(i = 0; i < image->height; i++) for(j = 0; j < image->width; j++) fprintf(file, "%c%c%c", image->pixels[i][j].R, image->pixels[i][j].G, image->pixels[i][j].B); //Write EOF fprintf(file, "%d", EOF); //Close file fclose(file); return 1; } IMAGE *image_create_blank(IMAGE *source) { //Declare IMAGE *image = (IMAGE*) malloc( sizeof(IMAGE) ); //Copy info(except pixels) strcpy(image->header, source->header); image->height = source->height; image->width = source->width; image->color_depth = source->color_depth; //Alloc mem for pixels image->pixels = (pixel**) malloc(image->height * sizeof(pixel*)); int i; for(i = 0; i < image->height; i++) image->pixels[i] = (pixel*) malloc(image->width * sizeof(pixel)); return image; } void image_free(IMAGE *image) { //Free pixels int i; for(i = 0; i < image->height; i++) free(image->pixels[i]); free(image->pixels); //Free image free(image); } // Create filter FILTER *filter_create_avg(int radius) { //Allocate mem for the structure FILTER *filter = (FILTER*) malloc(sizeof(FILTER)); filter->radius = radius; filter->type = FILTER_AVG; //Used for iterations int i, j; //The matrix width and height int dim = 2*radius+1; //Alocate mem for the matrix filter->matrix = (double**) malloc(dim * sizeof(double*)); for(i = 0; i < dim; i++) filter->matrix[i] = (double*) malloc(dim * sizeof(double)); //The value that every entry in the matrix will contain double avg = 1.0 / (dim * dim); //Set the values for(i = 0; i < dim; i++) for(j = 0; j < dim; j++) filter->matrix[i][j] = avg; return filter; } double gauss_2d(int x, int y, double sigma) { double result = 1.0 / (2 * PI * sigma * sigma); result *= exp(-(x*x+y*y)/(2 * sigma * sigma)); return result; } FILTER *filter_create_gauss(int radius, double sigma) { //Allocate mem for the structure FILTER *filter = (FILTER*) malloc(sizeof(FILTER)); filter->radius = radius; filter->type = FILTER_GAUSS; //Used for iterations int i, j; //The matrix width and height int dim = 2*radius+1; //Alocate mem for the matrix filter->matrix = (double**) malloc(dim * sizeof(double*)); for(i = 0; i < dim; i++) filter->matrix[i] = (double*) malloc(dim * sizeof(double)); //Calculate double sum = 0.0; for(i = -radius; i <= radius; i++) for(j = -radius; j <= radius; j++) { filter->matrix[i+radius][j+radius] = gauss_2d(j, i, sigma); sum += filter->matrix[i+radius][j+radius]; } //Correct so that the sum of all elements ~= 1 for(i = 0; i < 2*radius+1; i++) for(j = 0; j < 2*radius+1; j++) filter->matrix[i][j] /= sum; return filter; } void filter_print(FILTER *filter) { int dim = 2*filter->radius+1, i, j; for(i = 0; i < dim; i++) { for(j = 0; j < dim; j++) printf("%lf ", filter->matrix[i][j]); printf("\n"); } } void filter_free(FILTER *filter) { //Free matrix int dim=2*filter->radius+1, i; for(i = 0; i < dim; i++) free(filter->matrix[i]); free(filter->matrix); //Free filter free(filter); } // Manipulate image with filter void apply_to_pixel(int x, int y, IMAGE *original, IMAGE *result, FILTER *filter) { if(x<filter->radius || y<filter->radius || x>=original->width-filter->radius || y>=original->height-filter->radius) { result->pixels[y][x] = original->pixels[y][x]; return; } int i, j; pixel res; double res_R = 0; double res_G = 0; double res_B = 0; double fil; //#pragma omp for reduction(+:res_R,res_G, res_B) private(fil) for( i = -filter->radius; i <= filter->radius; i++) for( j = -filter->radius; j <= filter->radius; j++) { fil = filter->matrix[i+filter->radius][j+filter->radius]; res_R += fil * original->pixels[y+i][x+j].R; res_G += fil * original->pixels[y+i][x+j].G; res_B += fil * original->pixels[y+i][x+j].B; } result->pixels[y][x].R = res_R; result->pixels[y][x].G = res_G; result->pixels[y][x].B = res_B; } IMAGE *apply_filter(IMAGE *original, FILTER *filter) { IMAGE *result = image_create_blank(original); int x, y; #pragma omp parallel for for(y = 0; y < original->height; y++) for(x = 0; x < original->width; x++) apply_to_pixel(x, y, original, result, filter); return result; } // MAIN PROGRAM int main(int argc, char *argv[]) { //The image that is going to be blurred IMAGE *image = NULL; //The resulting image IMAGE *result = NULL; //The used filter FILTER *filter; //Info char image_file_name[50]; char result_file_name[50]; int radius; double sigma; //Arguments: argv[0]="path", argv[1]="image_name.ppm", argv[2]="result_image_name.ppm" argv[3]="radius" argv[4]="sigma" if(argc == 5) { //If enought arguments given take the info from the them //Original image file name strcpy(image_file_name, argv[1]); //Result image file name strcpy(result_file_name, argv[2]); //Convert radius radius = atoi(argv[3]); //Convert sigma sigma = atof(argv[4]); } else { //Read info from keyboard //Original image file name printf("Original image name: "); scanf("%s", image_file_name); //Result image file name printf("Result image name: "); scanf("%s", result_file_name); //Read radius printf("Radius: "); scanf("%d", &radius); //Read sigma printf("Sigma: "); scanf("%lf", &sigma); } //Load image printf("Loading image...\n"); image = image_load(image_file_name); //Create filter printf("Creating filter...\n"); filter = filter_create_gauss(radius, sigma); double start, stop; int nthreads; //omp_set_num_threads(2); start = omp_get_wtime(); //Apply filter printf("Appling filter...\n"); result = apply_filter(image, filter); stop=omp_get_wtime(); printf("Wall Time: "); printf("%f \n", stop-start); //Write image to disk printf("Writing image to disk...\n"); image_write(result, result_file_name); //Free memory image_free(image); image_free(result); filter_free(filter); printf("DONE!\n"); return 0; }
6623.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop { #pragma omp for (i = 0; i < _PB_NY; i++) { y[i] = 0; } #pragma omp for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
atomic_messages.c
// RUN: %clang_cc1 -verify -fopenmp=libiomp5 -ferror-limit 100 %s int foo() { L1: foo(); #pragma omp atomic // expected-error@+1 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} { foo(); goto L1; // expected-error {{use of undeclared label 'L1'}} } goto L2; // expected-error {{use of undeclared label 'L2'}} #pragma omp atomic // expected-error@+1 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} { foo(); L2: foo(); } return 0; } struct S { int a; }; int readint() { int a = 0, b = 0; // Test for atomic read #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected lvalue expression}} a = 0; #pragma omp atomic read a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} #pragma omp atomic read read a = b; return 0; } int readS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} #pragma omp atomic read read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int writeint() { int a = 0, b = 0; // Test for atomic write #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic write a = 0; #pragma omp atomic write a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write a = b; return 0; } int writeS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; }
8240.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11) #pragma omp target teams distribute #p #p for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
tree-vect-loop.c
/* Loop Vectorization Copyright (C) 2003-2017 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> and Ira Rosen <irar@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "target.h" #include "rtl.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "tree-pass.h" #include "ssa.h" #include "optabs-tree.h" #include "diagnostic-core.h" #include "fold-const.h" #include "stor-layout.h" #include "cfganal.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "tree-ssa-loop-ivopts.h" #include "tree-ssa-loop-manip.h" #include "tree-ssa-loop-niter.h" #include "tree-ssa-loop.h" #include "cfgloop.h" #include "params.h" #include "tree-scalar-evolution.h" #include "tree-vectorizer.h" #include "gimple-fold.h" #include "cgraph.h" #include "tree-cfg.h" #include "tree-if-conv.h" /* Loop Vectorization Pass. This pass tries to vectorize loops. For example, the vectorizer transforms the following simple loop: short a[N]; short b[N]; short c[N]; int i; for (i=0; i<N; i++){ a[i] = b[i] + c[i]; } as if it was manually vectorized by rewriting the source code into: typedef int __attribute__((mode(V8HI))) v8hi; short a[N]; short b[N]; short c[N]; int i; v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c; v8hi va, vb, vc; for (i=0; i<N/8; i++){ vb = pb[i]; vc = pc[i]; va = vb + vc; pa[i] = va; } The main entry to this pass is vectorize_loops(), in which the vectorizer applies a set of analyses on a given set of loops, followed by the actual vectorization transformation for the loops that had successfully passed the analysis phase. Throughout this pass we make a distinction between two types of data: scalars (which are represented by SSA_NAMES), and memory references ("data-refs"). These two types of data require different handling both during analysis and transformation. The types of data-refs that the vectorizer currently supports are ARRAY_REFS which base is an array DECL (not a pointer), and INDIRECT_REFS through pointers; both array and pointer accesses are required to have a simple (consecutive) access pattern. Analysis phase: =============== The driver for the analysis phase is vect_analyze_loop(). It applies a set of analyses, some of which rely on the scalar evolution analyzer (scev) developed by Sebastian Pop. During the analysis phase the vectorizer records some information per stmt in a "stmt_vec_info" struct which is attached to each stmt in the loop, as well as general information about the loop as a whole, which is recorded in a "loop_vec_info" struct attached to each loop. Transformation phase: ===================== The loop transformation phase scans all the stmts in the loop, and creates a vector stmt (or a sequence of stmts) for each scalar stmt S in the loop that needs to be vectorized. It inserts the vector code sequence just before the scalar stmt S, and records a pointer to the vector code in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct attached to S). This pointer will be used for the vectorization of following stmts which use the def of stmt S. Stmt S is removed if it writes to memory; otherwise, we rely on dead code elimination for removing it. For example, say stmt S1 was vectorized into stmt VS1: VS1: vb = px[i]; S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 S2: a = b; To vectorize stmt S2, the vectorizer first finds the stmt that defines the operand 'b' (S1), and gets the relevant vector def 'vb' from the vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The resulting sequence would be: VS1: vb = px[i]; S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 VS2: va = vb; S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2 Operands that are not SSA_NAMEs, are data-refs that appear in load/store operations (like 'x[i]' in S1), and are handled differently. Target modeling: ================= Currently the only target specific information that is used is the size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". Targets that can support different sizes of vectors, for now will need to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More flexibility will be added in the future. Since we only vectorize operations which vector form can be expressed using existing tree codes, to verify that an operation is supported, the vectorizer checks the relevant optab at the relevant machine_mode (e.g, optab_handler (add_optab, V8HImode)). If the value found is CODE_FOR_nothing, then there's no target support, and we can't vectorize the stmt. For additional information on this project see: http://gcc.gnu.org/projects/tree-ssa/vectorization.html */ static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *); /* Function vect_determine_vectorization_factor Determine the vectorization factor (VF). VF is the number of data elements that are operated upon in parallel in a single iteration of the vectorized loop. For example, when vectorizing a loop that operates on 4byte elements, on a target with vector size (VS) 16byte, the VF is set to 4, since 4 elements can fit in a single vector register. We currently support vectorization of loops in which all types operated upon are of the same size. Therefore this function currently sets VF according to the size of the types operated upon, and fails if there are multiple sizes in the loop. VF is also the factor by which the loop iterations are strip-mined, e.g.: original loop: for (i=0; i<N; i++){ a[i] = b[i] + c[i]; } vectorized loop: for (i=0; i<N; i+=VF){ a[i:VF] = b[i:VF] + c[i:VF]; } */ static bool vect_determine_vectorization_factor (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); unsigned nbbs = loop->num_nodes; unsigned int vectorization_factor = 0; tree scalar_type = NULL_TREE; gphi *phi; tree vectype; unsigned int nunits; stmt_vec_info stmt_info; unsigned i; HOST_WIDE_INT dummy; gimple *stmt, *pattern_stmt = NULL; gimple_seq pattern_def_seq = NULL; gimple_stmt_iterator pattern_def_si = gsi_none (); bool analyze_pattern_stmt = false; bool bool_result; auto_vec<stmt_vec_info> mask_producers; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_determine_vectorization_factor ===\n"); for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { phi = si.phi (); stmt_info = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } gcc_assert (stmt_info); if (STMT_VINFO_RELEVANT_P (stmt_info) || STMT_VINFO_LIVE_P (stmt_info)) { gcc_assert (!STMT_VINFO_VECTYPE (stmt_info)); scalar_type = TREE_TYPE (PHI_RESULT (phi)); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vectype = get_vectype_for_scalar_type (scalar_type); if (!vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported " "data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } STMT_VINFO_VECTYPE (stmt_info) = vectype; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); dump_printf (MSG_NOTE, "\n"); } nunits = TYPE_VECTOR_SUBPARTS (vectype); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits); if (!vectorization_factor || (nunits > vectorization_factor)) vectorization_factor = nunits; } } for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;) { tree vf_vectype; if (analyze_pattern_stmt) stmt = pattern_stmt; else stmt = gsi_stmt (si); stmt_info = vinfo_for_stmt (stmt); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } gcc_assert (stmt_info); /* Skip stmts which do not need to be vectorized. */ if ((!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) || gimple_clobber_p (stmt)) { if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) { stmt = pattern_stmt; stmt_info = vinfo_for_stmt (pattern_stmt); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining pattern statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } } else { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "skip.\n"); gsi_next (&si); continue; } } else if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) analyze_pattern_stmt = true; /* If a pattern statement has def stmts, analyze them too. */ if (is_pattern_stmt_p (stmt_info)) { if (pattern_def_seq == NULL) { pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); pattern_def_si = gsi_start (pattern_def_seq); } else if (!gsi_end_p (pattern_def_si)) gsi_next (&pattern_def_si); if (pattern_def_seq != NULL) { gimple *pattern_def_stmt = NULL; stmt_vec_info pattern_def_stmt_info = NULL; while (!gsi_end_p (pattern_def_si)) { pattern_def_stmt = gsi_stmt (pattern_def_si); pattern_def_stmt_info = vinfo_for_stmt (pattern_def_stmt); if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) break; gsi_next (&pattern_def_si); } if (!gsi_end_p (pattern_def_si)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining pattern def stmt: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); } stmt = pattern_def_stmt; stmt_info = pattern_def_stmt_info; } else { pattern_def_si = gsi_none (); analyze_pattern_stmt = false; } } else analyze_pattern_stmt = false; } if (gimple_get_lhs (stmt) == NULL_TREE /* MASK_STORE has no lhs, but is ok. */ && (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt) || gimple_call_internal_fn (stmt) != IFN_MASK_STORE)) { if (is_gimple_call (stmt)) { /* Ignore calls with no lhs. These must be calls to #pragma omp simd functions, and what vectorization factor it really needs can't be determined until vectorizable_simd_clone_call. */ if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: irregular stmt."); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vector stmt in loop:"); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } bool_result = false; if (STMT_VINFO_VECTYPE (stmt_info)) { /* The only case when a vectype had been already set is for stmts that contain a dataref, or for "pattern-stmts" (stmts generated by the vectorizer to represent/replace a certain idiom). */ gcc_assert (STMT_VINFO_DATA_REF (stmt_info) || is_pattern_stmt_p (stmt_info) || !gsi_end_p (pattern_def_si)); vectype = STMT_VINFO_VECTYPE (stmt_info); } else { gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)); if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3)); else scalar_type = TREE_TYPE (gimple_get_lhs (stmt)); /* Bool ops don't participate in vectorization factor computation. For comparison use compared types to compute a factor. */ if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type) && is_gimple_assign (stmt) && gimple_assign_rhs_code (stmt) != COND_EXPR) { if (STMT_VINFO_RELEVANT_P (stmt_info) || STMT_VINFO_LIVE_P (stmt_info)) mask_producers.safe_push (stmt_info); bool_result = true; if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); else { if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vectype = get_vectype_for_scalar_type (scalar_type); if (!vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported " "data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (!bool_result) STMT_VINFO_VECTYPE (stmt_info) = vectype; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); dump_printf (MSG_NOTE, "\n"); } } /* Don't try to compute VF out scalar types if we stmt produces boolean vector. Use result vectype instead. */ if (VECTOR_BOOLEAN_TYPE_P (vectype)) vf_vectype = vectype; else { /* The vectorization factor is according to the smallest scalar type (or the largest vector size, but we only support one vector size per loop). */ if (!bool_result) scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vf_vectype = get_vectype_for_scalar_type (scalar_type); } if (!vf_vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if ((GET_MODE_SIZE (TYPE_MODE (vectype)) != GET_MODE_SIZE (TYPE_MODE (vf_vectype)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: different sized vector " "types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vf_vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype); dump_printf (MSG_NOTE, "\n"); } nunits = TYPE_VECTOR_SUBPARTS (vf_vectype); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits); if (!vectorization_factor || (nunits > vectorization_factor)) vectorization_factor = nunits; if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } } } /* TODO: Analyze cost. Decide if worth while to vectorize. */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n", vectorization_factor); if (vectorization_factor <= 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported data-type\n"); return false; } LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; for (i = 0; i < mask_producers.length (); i++) { tree mask_type = NULL; stmt = STMT_VINFO_STMT (mask_producers[i]); if (is_gimple_assign (stmt) && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) { scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); mask_type = get_mask_type_for_scalar_type (scalar_type); if (!mask_type) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported mask\n"); return false; } } else { tree rhs; ssa_op_iter iter; gimple *def_stmt; enum vect_def_type dt; FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE) { if (!vect_is_simple_use (rhs, mask_producers[i]->vinfo, &def_stmt, &dt, &vectype)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't compute mask type " "for statement, "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } /* No vectype probably means external definition. Allow it in case there is another operand which allows to determine mask type. */ if (!vectype) continue; if (!mask_type) mask_type = vectype; else if (TYPE_VECTOR_SUBPARTS (mask_type) != TYPE_VECTOR_SUBPARTS (vectype)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: different sized masks " "types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_type); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } else if (VECTOR_BOOLEAN_TYPE_P (mask_type) != VECTOR_BOOLEAN_TYPE_P (vectype)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: mixed mask and " "nonmask vector types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_type); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } } /* We may compare boolean value loaded as vector of integers. Fix mask_type in such case. */ if (mask_type && !VECTOR_BOOLEAN_TYPE_P (mask_type) && gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) mask_type = build_same_sized_truth_vector_type (mask_type); } /* No mask_type should mean loop invariant predicate. This is probably a subject for optimization in if-conversion. */ if (!mask_type) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't compute mask type " "for statement, "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } STMT_VINFO_VECTYPE (mask_producers[i]) = mask_type; } return true; } /* Function vect_is_simple_iv_evolution. FORNOW: A simple evolution of an induction variables in the loop is considered a polynomial evolution. */ static bool vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init, tree * step) { tree init_expr; tree step_expr; tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb); basic_block bb; /* When there is no evolution in this loop, the evolution function is not "simple". */ if (evolution_part == NULL_TREE) return false; /* When the evolution is a polynomial of degree >= 2 the evolution function is not "simple". */ if (tree_is_chrec (evolution_part)) return false; step_expr = evolution_part; init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb)); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "step: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr); dump_printf (MSG_NOTE, ", init: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr); dump_printf (MSG_NOTE, "\n"); } *init = init_expr; *step = step_expr; if (TREE_CODE (step_expr) != INTEGER_CST && (TREE_CODE (step_expr) != SSA_NAME || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr))) && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb)) || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr)) && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)) || !flag_associative_math))) && (TREE_CODE (step_expr) != REAL_CST || !flag_associative_math)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "step unknown.\n"); return false; } return true; } /* Function vect_analyze_scalar_cycles_1. Examine the cross iteration def-use cycles of scalar variables in LOOP. LOOP_VINFO represents the loop that is now being considered for vectorization (can be LOOP, or an outer-loop enclosing LOOP). */ static void vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) { basic_block bb = loop->header; tree init, step; auto_vec<gimple *, 64> worklist; gphi_iterator gsi; bool double_reduc; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_scalar_cycles ===\n"); /* First - identify all inductions. Reduction detection assumes that all the inductions have been identified, therefore, this order must not be changed. */ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gphi *phi = gsi.phi (); tree access_fn = NULL; tree def = PHI_RESULT (phi); stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } /* Skip virtual phi's. The data dependences that are associated with virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */ if (virtual_operand_p (def)) continue; STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type; /* Analyze the evolution function. */ access_fn = analyze_scalar_evolution (loop, def); if (access_fn) { STRIP_NOPS (access_fn); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Access function of PHI: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn); dump_printf (MSG_NOTE, "\n"); } STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) = initial_condition_in_loop_num (access_fn, loop->num); STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) = evolution_part_in_loop_num (access_fn, loop->num); } if (!access_fn || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step) || (LOOP_VINFO_LOOP (loop_vinfo) != loop && TREE_CODE (step) != INTEGER_CST)) { worklist.safe_push (phi); continue; } gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) != NULL_TREE); gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def; } /* Second - identify all reductions and nested cycles. */ while (worklist.length () > 0) { gimple *phi = worklist.pop (); tree def = PHI_RESULT (phi); stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); gimple *reduc_stmt; bool nested_cycle; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } gcc_assert (!virtual_operand_p (def) && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo)); reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle, &double_reduc, false); if (reduc_stmt) { if (double_reduc) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected double reduction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_double_reduction_def; } else { if (nested_cycle) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected vectorizable nested cycle.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_nested_cycle; } else { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected reduction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_reduction_def; /* Store the reduction cycles for possible vectorization in loop-aware SLP. */ LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt); } } } else if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unknown def-use cycle pattern.\n"); } } /* Function vect_analyze_scalar_cycles. Examine the cross iteration def-use cycles of scalar variables, by analyzing the loop-header PHIs of scalar variables. Classify each cycle as one of the following: invariant, induction, reduction, unknown. We do that for the loop represented by LOOP_VINFO, and also to its inner-loop, if exists. Examples for scalar cycles: Example1: reduction: loop1: for (i=0; i<N; i++) sum += a[i]; Example2: induction: loop2: for (i=0; i<N; i++) a[i] = i; */ static void vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); vect_analyze_scalar_cycles_1 (loop_vinfo, loop); /* When vectorizing an outer-loop, the inner-loop is executed sequentially. Reductions in such inner-loop therefore have different properties than the reductions in the nest that gets vectorized: 1. When vectorized, they are executed in the same order as in the original scalar loop, so we can't change the order of computation when vectorizing them. 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the current checks are too strict. */ if (loop->inner) vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); } /* Transfer group and reduction information from STMT to its pattern stmt. */ static void vect_fixup_reduc_chain (gimple *stmt) { gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); gimple *stmtp; gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp)) && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt)); do { stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp; stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); if (stmt) GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp)) = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); } while (stmt); STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp)) = vect_reduction_def; } /* Fixup scalar cycles that now have their stmts detected as patterns. */ static void vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo) { gimple *first; unsigned i; FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first) if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first))) { gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)); while (next) { if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))) break; next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); } /* If not all stmt in the chain are patterns try to handle the chain without patterns. */ if (! next) { vect_fixup_reduc_chain (first); LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i] = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first)); } } } /* Function vect_get_loop_niters. Determine how many iterations the loop is executed and place it in NUMBER_OF_ITERATIONS. Place the number of latch iterations in NUMBER_OF_ITERATIONSM1. Place the condition under which the niter information holds in ASSUMPTIONS. Return the loop exit condition. */ static gcond * vect_get_loop_niters (struct loop *loop, tree *assumptions, tree *number_of_iterations, tree *number_of_iterationsm1) { edge exit = single_exit (loop); struct tree_niter_desc niter_desc; tree niter_assumptions, niter, may_be_zero; gcond *cond = get_loop_exit_condition (loop); *assumptions = boolean_true_node; *number_of_iterationsm1 = chrec_dont_know; *number_of_iterations = chrec_dont_know; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== get_loop_niters ===\n"); if (!exit) return cond; niter = chrec_dont_know; may_be_zero = NULL_TREE; niter_assumptions = boolean_true_node; if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL) || chrec_contains_undetermined (niter_desc.niter)) return cond; niter_assumptions = niter_desc.assumptions; may_be_zero = niter_desc.may_be_zero; niter = niter_desc.niter; if (may_be_zero && integer_zerop (may_be_zero)) may_be_zero = NULL_TREE; if (may_be_zero) { if (COMPARISON_CLASS_P (may_be_zero)) { /* Try to combine may_be_zero with assumptions, this can simplify computation of niter expression. */ if (niter_assumptions && !integer_nonzerop (niter_assumptions)) niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, niter_assumptions, fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, may_be_zero)); else niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero, build_int_cst (TREE_TYPE (niter), 0), niter); may_be_zero = NULL_TREE; } else if (integer_nonzerop (may_be_zero)) { *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0); *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1); return cond; } else return cond; } *assumptions = niter_assumptions; *number_of_iterationsm1 = niter; /* We want the number of loop header executions which is the number of latch executions plus one. ??? For UINT_MAX latch executions this number overflows to zero for loops like do { n++; } while (n != 0); */ if (niter && !chrec_contains_undetermined (niter)) niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter), build_int_cst (TREE_TYPE (niter), 1)); *number_of_iterations = niter; return cond; } /* Function bb_in_loop_p Used as predicate for dfs order traversal of the loop bbs. */ static bool bb_in_loop_p (const_basic_block bb, const void *data) { const struct loop *const loop = (const struct loop *)data; if (flow_bb_inside_loop_p (loop, bb)) return true; return false; } /* Function new_loop_vec_info. Create and initialize a new loop_vec_info struct for LOOP, as well as stmt_vec_info structs for all the stmts in LOOP. */ static loop_vec_info new_loop_vec_info (struct loop *loop) { loop_vec_info res; basic_block *bbs; gimple_stmt_iterator si; unsigned int i, nbbs; res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info)); res->kind = vec_info::loop; LOOP_VINFO_LOOP (res) = loop; bbs = get_loop_body (loop); /* Create/Update stmt_info for all stmts in the loop. */ for (i = 0; i < loop->num_nodes; i++) { basic_block bb = bbs[i]; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *phi = gsi_stmt (si); gimple_set_uid (phi, 0); set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res)); } for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); gimple_set_uid (stmt, 0); set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res)); } } /* CHECKME: We want to visit all BBs before their successors (except for latch blocks, for which this assertion wouldn't hold). In the simple case of the loop forms we allow, a dfs order of the BBs would the same as reversed postorder traversal, so we are safe. */ free (bbs); bbs = XCNEWVEC (basic_block, loop->num_nodes); nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p, bbs, loop->num_nodes, loop); gcc_assert (nbbs == loop->num_nodes); LOOP_VINFO_BBS (res) = bbs; LOOP_VINFO_NITERSM1 (res) = NULL; LOOP_VINFO_NITERS (res) = NULL; LOOP_VINFO_NITERS_UNCHANGED (res) = NULL; LOOP_VINFO_NITERS_ASSUMPTIONS (res) = NULL; LOOP_VINFO_COST_MODEL_THRESHOLD (res) = 0; LOOP_VINFO_VECTORIZABLE_P (res) = 0; LOOP_VINFO_PEELING_FOR_ALIGNMENT (res) = 0; LOOP_VINFO_VECT_FACTOR (res) = 0; LOOP_VINFO_LOOP_NEST (res) = vNULL; LOOP_VINFO_DATAREFS (res) = vNULL; LOOP_VINFO_DDRS (res) = vNULL; LOOP_VINFO_UNALIGNED_DR (res) = NULL; LOOP_VINFO_MAY_MISALIGN_STMTS (res) = vNULL; LOOP_VINFO_MAY_ALIAS_DDRS (res) = vNULL; LOOP_VINFO_GROUPED_STORES (res) = vNULL; LOOP_VINFO_REDUCTIONS (res) = vNULL; LOOP_VINFO_REDUCTION_CHAINS (res) = vNULL; LOOP_VINFO_SLP_INSTANCES (res) = vNULL; LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1; LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop); LOOP_VINFO_PEELING_FOR_GAPS (res) = false; LOOP_VINFO_PEELING_FOR_NITER (res) = false; LOOP_VINFO_OPERANDS_SWAPPED (res) = false; LOOP_VINFO_ORIG_LOOP_INFO (res) = NULL; return res; } /* Function destroy_loop_vec_info. Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the stmts in the loop. */ void destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts) { struct loop *loop; basic_block *bbs; int nbbs; gimple_stmt_iterator si; int j; vec<slp_instance> slp_instances; slp_instance instance; bool swapped; if (!loop_vinfo) return; loop = LOOP_VINFO_LOOP (loop_vinfo); bbs = LOOP_VINFO_BBS (loop_vinfo); nbbs = clean_stmts ? loop->num_nodes : 0; swapped = LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo); for (j = 0; j < nbbs; j++) { basic_block bb = bbs[j]; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) free_stmt_vec_info (gsi_stmt (si)); for (si = gsi_start_bb (bb); !gsi_end_p (si); ) { gimple *stmt = gsi_stmt (si); /* We may have broken canonical form by moving a constant into RHS1 of a commutative op. Fix such occurrences. */ if (swapped && is_gimple_assign (stmt)) { enum tree_code code = gimple_assign_rhs_code (stmt); if ((code == PLUS_EXPR || code == POINTER_PLUS_EXPR || code == MULT_EXPR) && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt))) swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt), gimple_assign_rhs2_ptr (stmt)); else if (code == COND_EXPR && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt))) { tree cond_expr = gimple_assign_rhs1 (stmt); enum tree_code cond_code = TREE_CODE (cond_expr); if (TREE_CODE_CLASS (cond_code) == tcc_comparison) { bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0)); cond_code = invert_tree_comparison (cond_code, honor_nans); if (cond_code != ERROR_MARK) { TREE_SET_CODE (cond_expr, cond_code); swap_ssa_operands (stmt, gimple_assign_rhs2_ptr (stmt), gimple_assign_rhs3_ptr (stmt)); } } } } /* Free stmt_vec_info. */ free_stmt_vec_info (stmt); gsi_next (&si); } } free (LOOP_VINFO_BBS (loop_vinfo)); vect_destroy_datarefs (loop_vinfo); free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo)); LOOP_VINFO_LOOP_NEST (loop_vinfo).release (); LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release (); LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release (); LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release (); slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo); FOR_EACH_VEC_ELT (slp_instances, j, instance) vect_free_slp_instance (instance); LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); LOOP_VINFO_GROUPED_STORES (loop_vinfo).release (); LOOP_VINFO_REDUCTIONS (loop_vinfo).release (); LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release (); destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); loop_vinfo->scalar_cost_vec.release (); free (loop_vinfo); loop->aux = NULL; } /* Calculate the cost of one scalar iteration of the loop. */ static void vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0; int innerloop_iters, i; /* Count statements in scalar loop. Using this as scalar cost for a single iteration for now. TODO: Add outer loop support. TODO: Consider assigning different costs to different scalar statements. */ /* FORNOW. */ innerloop_iters = 1; if (loop->inner) innerloop_iters = 50; /* FIXME */ for (i = 0; i < nbbs; i++) { gimple_stmt_iterator si; basic_block bb = bbs[i]; if (bb->loop_father == loop->inner) factor = innerloop_iters; else factor = 1; for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); if (!is_gimple_assign (stmt) && !is_gimple_call (stmt)) continue; /* Skip stmts that are not vectorized inside the loop. */ if (stmt_info && !STMT_VINFO_RELEVANT_P (stmt_info) && (!STMT_VINFO_LIVE_P (stmt_info) || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) && !STMT_VINFO_IN_PATTERN_P (stmt_info)) continue; vect_cost_for_stmt kind; if (STMT_VINFO_DATA_REF (stmt_info)) { if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info))) kind = scalar_load; else kind = scalar_store; } else kind = scalar_stmt; scalar_single_iter_cost += record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), factor, kind, stmt_info, 0, vect_prologue); } } LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = scalar_single_iter_cost; } /* Function vect_analyze_loop_form_1. Verify that certain CFG restrictions hold, including: - the loop has a pre-header - the loop has a single entry and exit - the loop exit condition is simple enough - the number of iterations can be analyzed, i.e, a countable loop. The niter could be analyzed under some assumptions. */ bool vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond, tree *assumptions, tree *number_of_iterationsm1, tree *number_of_iterations, gcond **inner_loop_cond) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_loop_form ===\n"); /* Different restrictions apply when we are considering an inner-most loop, vs. an outer (nested) loop. (FORNOW. May want to relax some of these restrictions in the future). */ if (!loop->inner) { /* Inner-most loop. We currently require that the number of BBs is exactly 2 (the header and latch). Vectorizable inner-most loops look like this: (pre-header) | header <--------+ | | | | +--> latch --+ | (exit-bb) */ if (loop->num_nodes != 2) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: control flow in loop.\n"); return false; } if (empty_block_p (loop->header)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: empty loop.\n"); return false; } } else { struct loop *innerloop = loop->inner; edge entryedge; /* Nested loop. We currently require that the loop is doubly-nested, contains a single inner loop, and the number of BBs is exactly 5. Vectorizable outer-loops look like this: (pre-header) | header <---+ | | inner-loop | | | tail ------+ | (exit-bb) The inner-loop has the properties expected of inner-most loops as described above. */ if ((loop->inner)->inner || (loop->inner)->next) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: multiple nested loops.\n"); return false; } if (loop->num_nodes != 5) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: control flow in loop.\n"); return false; } entryedge = loop_preheader_edge (innerloop); if (entryedge->src != loop->header || !single_exit (innerloop) || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported outerloop form.\n"); return false; } /* Analyze the inner-loop. */ tree inner_niterm1, inner_niter, inner_assumptions; if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond, &inner_assumptions, &inner_niterm1, &inner_niter, NULL) /* Don't support analyzing niter under assumptions for inner loop. */ || !integer_onep (inner_assumptions)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: Bad inner loop.\n"); return false; } if (!expr_invariant_in_loop_p (loop, inner_niter)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: inner-loop count not" " invariant.\n"); return false; } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Considering outer-loop vectorization.\n"); } if (!single_exit (loop) || EDGE_COUNT (loop->header->preds) != 2) { if (dump_enabled_p ()) { if (!single_exit (loop)) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: multiple exits.\n"); else if (EDGE_COUNT (loop->header->preds) != 2) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: too many incoming edges.\n"); } return false; } /* We assume that the loop exit condition is at the end of the loop. i.e, that the loop is represented as a do-while (with a proper if-guard before the loop if needed), where the loop header contains all the executable statements, and the latch is empty. */ if (!empty_block_p (loop->latch) || !gimple_seq_empty_p (phi_nodes (loop->latch))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: latch block not empty.\n"); return false; } /* Make sure the exit is not abnormal. */ edge e = single_exit (loop); if (e->flags & EDGE_ABNORMAL) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: abnormal loop exit edge.\n"); return false; } *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations, number_of_iterationsm1); if (!*loop_cond) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: complicated exit condition.\n"); return false; } if (integer_zerop (*assumptions) || !*number_of_iterations || chrec_contains_undetermined (*number_of_iterations)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: number of iterations cannot be " "computed.\n"); return false; } if (integer_zerop (*number_of_iterations)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: number of iterations = 0.\n"); return false; } return true; } /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */ loop_vec_info vect_analyze_loop_form (struct loop *loop) { tree assumptions, number_of_iterations, number_of_iterationsm1; gcond *loop_cond, *inner_loop_cond = NULL; if (! vect_analyze_loop_form_1 (loop, &loop_cond, &assumptions, &number_of_iterationsm1, &number_of_iterations, &inner_loop_cond)) return NULL; loop_vec_info loop_vinfo = new_loop_vec_info (loop); LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1; LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations; LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations; if (!integer_onep (assumptions)) { /* We consider to vectorize this loop by versioning it under some assumptions. In order to do this, we need to clear existing information computed by scev and niter analyzer. */ scev_reset_htab (); free_numbers_of_iterations_estimates_loop (loop); /* Also set flag for this loop so that following scev and niter analysis are done under the assumptions. */ loop_constraint_set (loop, LOOP_C_FINITE); /* Also record the assumptions for versioning. */ LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions; } if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Symbolic number of iterations is "); dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations); dump_printf (MSG_NOTE, "\n"); } } STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type; if (inner_loop_cond) STMT_VINFO_TYPE (vinfo_for_stmt (inner_loop_cond)) = loop_exit_ctrl_vec_info_type; gcc_assert (!loop->aux); loop->aux = loop_vinfo; return loop_vinfo; } /* Scan the loop stmts and dependent on whether there are any (non-)SLP statements update the vectorization factor. */ static void vect_update_vf_for_slp (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; unsigned int vectorization_factor; int i; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_update_vf_for_slp ===\n"); vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); gcc_assert (vectorization_factor != 0); /* If all the stmts in the loop can be SLPed, we perform only SLP, and vectorization factor of the loop is the unrolling factor required by the SLP instances. If that unrolling factor is 1, we say, that we perform pure SLP on loop - cross iteration parallelism is not exploited. */ bool only_slp_in_loop = true; for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); if (STMT_VINFO_IN_PATTERN_P (stmt_info) && STMT_VINFO_RELATED_STMT (stmt_info)) { stmt = STMT_VINFO_RELATED_STMT (stmt_info); stmt_info = vinfo_for_stmt (stmt); } if ((STMT_VINFO_RELEVANT_P (stmt_info) || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) && !PURE_SLP_STMT (stmt_info)) /* STMT needs both SLP and loop-based vectorization. */ only_slp_in_loop = false; } } if (only_slp_in_loop) vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo); else vectorization_factor = least_common_multiple (vectorization_factor, LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo)); LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Updating vectorization factor to %d\n", vectorization_factor); } /* Function vect_analyze_loop_operations. Scan the loop stmts and make sure they are all vectorizable. */ static bool vect_analyze_loop_operations (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; int i; stmt_vec_info stmt_info; bool need_to_vectorize = false; bool ok; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_loop_operations ===\n"); for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gphi *phi = si.phi (); ok = true; stmt_info = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "examining phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } if (virtual_operand_p (gimple_phi_result (phi))) continue; /* Inner-loop loop-closed exit phi in outer-loop vectorization (i.e., a phi in the tail of the outer-loop). */ if (! is_loop_header_bb_p (bb)) { /* FORNOW: we currently don't support the case that these phis are not used in the outerloop (unless it is double reduction, i.e., this phi is vect_reduction_def), cause this case requires to actually do something here. */ if ((!STMT_VINFO_RELEVANT_P (stmt_info) || STMT_VINFO_LIVE_P (stmt_info)) && STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unsupported loop-closed phi in " "outer-loop.\n"); return false; } /* If PHI is used in the outer loop, we check that its operand is defined in the inner loop. */ if (STMT_VINFO_RELEVANT_P (stmt_info)) { tree phi_op; gimple *op_def_stmt; if (gimple_phi_num_args (phi) != 1) return false; phi_op = PHI_ARG_DEF (phi, 0); if (TREE_CODE (phi_op) != SSA_NAME) return false; op_def_stmt = SSA_NAME_DEF_STMT (phi_op); if (gimple_nop_p (op_def_stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt)) || !vinfo_for_stmt (op_def_stmt)) return false; if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) != vect_used_in_outer && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) != vect_used_in_outer_by_reduction) return false; } continue; } gcc_assert (stmt_info); if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope || STMT_VINFO_LIVE_P (stmt_info)) && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def) { /* A scalar-dependence cycle that we don't support. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: scalar dependence cycle.\n"); return false; } if (STMT_VINFO_RELEVANT_P (stmt_info)) { need_to_vectorize = true; if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def) ok = vectorizable_induction (phi, NULL, NULL); } if (ok && STMT_VINFO_LIVE_P (stmt_info)) ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL); if (!ok) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: relevant phi not " "supported: "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0); } return false; } } for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); if (!gimple_clobber_p (stmt) && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL)) return false; } } /* bbs */ /* All operations in the loop are either irrelevant (deal with loop control, or dead), or only used outside the loop and can be moved out of the loop (e.g. invariants, inductions). The loop can be optimized away by scalar optimizations. We're better off not touching this loop. */ if (!need_to_vectorize) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "All the computation can be taken out of the loop.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: redundant loop. no profit to " "vectorize.\n"); return false; } return true; } /* Function vect_analyze_loop_2. Apply a set of analyses on LOOP, and create a loop_vec_info struct for it. The different analyses will record information in the loop_vec_info struct. */ static bool vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal) { bool ok; int max_vf = MAX_VECTORIZATION_FACTOR; int min_vf = 2; unsigned int n_stmts = 0; /* The first group of checks is independent of the vector size. */ fatal = true; /* Find all data references in the loop (which correspond to vdefs/vuses) and analyze their evolution in the loop. */ basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); loop_p loop = LOOP_VINFO_LOOP (loop_vinfo); if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: loop nest containing two " "or more consecutive inner loops cannot be " "vectorized\n"); return false; } for (unsigned i = 0; i < loop->num_nodes; i++) for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) continue; ++n_stmts; if (!find_data_references_in_stmt (loop, stmt, &LOOP_VINFO_DATAREFS (loop_vinfo))) { if (is_gimple_call (stmt) && loop->safelen) { tree fndecl = gimple_call_fndecl (stmt), op; if (fndecl != NULL_TREE) { cgraph_node *node = cgraph_node::get (fndecl); if (node != NULL && node->simd_clones != NULL) { unsigned int j, n = gimple_call_num_args (stmt); for (j = 0; j < n; j++) { op = gimple_call_arg (stmt, j); if (DECL_P (op) || (REFERENCE_CLASS_P (op) && get_base_address (op))) break; } op = gimple_call_lhs (stmt); /* Ignore #pragma omp declare simd functions if they don't have data references in the call stmt itself. */ if (j == n && !(op && (DECL_P (op) || (REFERENCE_CLASS_P (op) && get_base_address (op))))) continue; } } } if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: loop contains function " "calls or data references that cannot " "be analyzed\n"); return false; } } /* Analyze the data references and also adjust the minimal vectorization factor according to the loads and stores. */ ok = vect_analyze_data_refs (loop_vinfo, &min_vf); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data references.\n"); return false; } /* Classify all cross-iteration scalar data-flow cycles. Cross-iteration cycles caused by virtual phis are analyzed separately. */ vect_analyze_scalar_cycles (loop_vinfo); vect_pattern_recog (loop_vinfo); vect_fixup_scalar_cycles_with_patterns (loop_vinfo); /* Analyze the access patterns of the data-refs in the loop (consecutive, complex, etc.). FORNOW: Only handle consecutive access pattern. */ ok = vect_analyze_data_ref_accesses (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data access.\n"); return false; } /* Data-flow analysis to detect stmts that do not need to be vectorized. */ ok = vect_mark_stmts_to_be_vectorized (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unexpected pattern.\n"); return false; } /* While the rest of the analysis below depends on it in some way. */ fatal = false; /* Analyze data dependences between the data-refs in the loop and adjust the maximum vectorization factor according to the dependences. FORNOW: fail at the first data dependence that we encounter. */ ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf); if (!ok || max_vf < min_vf) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data dependence.\n"); return false; } ok = vect_determine_vectorization_factor (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't determine vectorization factor.\n"); return false; } if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data dependence.\n"); return false; } /* Compute the scalar iteration cost. */ vect_compute_single_scalar_iteration_cost (loop_vinfo); int saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); HOST_WIDE_INT estimated_niter; unsigned th; int min_scalar_loop_bound; /* Check the SLP opportunities in the loop, analyze and build SLP trees. */ ok = vect_analyze_slp (loop_vinfo, n_stmts); if (!ok) return false; /* If there are any SLP instances mark them as pure_slp. */ bool slp = vect_make_slp_decision (loop_vinfo); if (slp) { /* Find stmts that need to be both vectorized and SLPed. */ vect_detect_hybrid_slp (loop_vinfo); /* Update the vectorization factor based on the SLP decision. */ vect_update_vf_for_slp (loop_vinfo); } /* This is the point where we can re-start analysis with SLP forced off. */ start_over: /* Now the vectorization factor is final. */ unsigned vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); gcc_assert (vectorization_factor != 0); if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo)); HOST_WIDE_INT max_niter = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo)); if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor)) || (max_niter != -1 && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: iteration count smaller than " "vectorization factor.\n"); return false; } /* Analyze the alignment of the data-refs in the loop. Fail if a data reference is found that cannot be vectorized. */ ok = vect_analyze_data_refs_alignment (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data alignment.\n"); return false; } /* Prune the list of ddrs to be tested at run-time by versioning for alias. It is important to call pruning after vect_analyze_data_ref_accesses, since we use grouping information gathered by interleaving analysis. */ ok = vect_prune_runtime_alias_test_list (loop_vinfo); if (!ok) return false; /* Do not invoke vect_enhance_data_refs_alignment for eplilogue vectorization. */ if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)) { /* This pass will decide on using loop versioning and/or loop peeling in order to enhance the alignment of data references in the loop. */ ok = vect_enhance_data_refs_alignment (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data alignment.\n"); return false; } } if (slp) { /* Analyze operations in the SLP instances. Note this may remove unsupported SLP instances which makes the above SLP kind detection invalid. */ unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length (); vect_slp_analyze_operations (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size) goto again; } /* Scan all the remaining operations in the loop that are not subject to SLP and make sure they are vectorizable. */ ok = vect_analyze_loop_operations (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad operation or unsupported loop bound.\n"); return false; } /* If epilog loop is required because of data accesses with gaps, one additional iteration needs to be peeled. Check if there is enough iterations for vectorization. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo); if (wi::to_widest (scalar_niters) < vf) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop has no enough iterations to support" " peeling for gaps.\n"); return false; } } /* Analyze cost. Decide if worth while to vectorize. */ int min_profitable_estimate, min_profitable_iters; vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters, &min_profitable_estimate); if (min_profitable_iters < 0) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vectorization not profitable.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vector version will never be " "profitable.\n"); goto again; } min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND) * vectorization_factor) - 1); /* Use the cost model only if it is more conservative than user specified threshold. */ th = (unsigned) min_scalar_loop_bound; if (min_profitable_iters && (!min_scalar_loop_bound || min_profitable_iters > min_scalar_loop_bound)) th = (unsigned) min_profitable_iters; LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th; if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vectorization not profitable.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "not vectorized: iteration count smaller than user " "specified loop bound parameter or minimum profitable " "iterations (whichever is more conservative).\n"); goto again; } estimated_niter = estimated_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo)); if (estimated_niter == -1) estimated_niter = max_niter; if (estimated_niter != -1 && ((unsigned HOST_WIDE_INT) estimated_niter <= MAX (th, (unsigned)min_profitable_estimate))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: estimated iteration count too " "small.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "not vectorized: estimated iteration count smaller " "than specified loop bound parameter or minimum " "profitable iterations (whichever is more " "conservative).\n"); goto again; } /* Decide whether we need to create an epilogue loop to handle remaining scalar iterations. */ th = ((LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) + 1) / LOOP_VINFO_VECT_FACTOR (loop_vinfo)) * LOOP_VINFO_VECT_FACTOR (loop_vinfo); if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0) { if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo) - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)) < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))) LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; } else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo)) < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)) /* In case of versioning, check if the maximum number of iterations is greater than th. If they are identical, the epilogue is unnecessary. */ && (!LOOP_REQUIRES_VERSIONING (loop_vinfo) || (unsigned HOST_WIDE_INT) max_niter > th))) LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; /* If an epilogue loop is required make sure we can create one. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n"); if (!vect_can_advance_ivs_p (loop_vinfo) || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo), single_exit (LOOP_VINFO_LOOP (loop_vinfo)))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't create required " "epilog loop\n"); goto again; } } gcc_assert (vectorization_factor == (unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)); /* Ok to vectorize! */ return true; again: /* Try again with SLP forced off but if we didn't do any SLP there is no point in re-trying. */ if (!slp) return false; /* If there are reduction chains re-trying will fail anyway. */ if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ()) return false; /* Likewise if the grouped loads or stores in the SLP cannot be handled via interleaving or lane instructions. */ slp_instance instance; slp_tree node; unsigned i, j; FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance) { stmt_vec_info vinfo; vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]); if (! STMT_VINFO_GROUPED_ACCESS (vinfo)) continue; vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo)); unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo); tree vectype = STMT_VINFO_VECTYPE (vinfo); if (! vect_store_lanes_supported (vectype, size) && ! vect_grouped_store_supported (vectype, size)) return false; FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node) { vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]); vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo)); bool single_element_p = !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo); size = STMT_VINFO_GROUP_SIZE (vinfo); vectype = STMT_VINFO_VECTYPE (vinfo); if (! vect_load_lanes_supported (vectype, size) && ! vect_grouped_load_supported (vectype, single_element_p, size)) return false; } } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "re-trying with SLP disabled\n"); /* Roll back state appropriately. No SLP this time. */ slp = false; /* Restore vectorization factor as it were without SLP. */ LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor; /* Free the SLP instances. */ FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance) vect_free_slp_instance (instance); LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); /* Reset SLP type to loop_vect on all stmts. */ for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i) { basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i]; for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si)); STMT_SLP_TYPE (stmt_info) = loop_vect; if (STMT_VINFO_IN_PATTERN_P (stmt_info)) { stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info)); STMT_SLP_TYPE (stmt_info) = loop_vect; for (gimple_stmt_iterator pi = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)); !gsi_end_p (pi); gsi_next (&pi)) { gimple *pstmt = gsi_stmt (pi); STMT_SLP_TYPE (vinfo_for_stmt (pstmt)) = loop_vect; } } } } /* Free optimized alias test DDRS. */ LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release (); /* Reset target cost data. */ destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); LOOP_VINFO_TARGET_COST_DATA (loop_vinfo) = init_cost (LOOP_VINFO_LOOP (loop_vinfo)); /* Reset assorted flags. */ LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false; LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false; LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0; goto start_over; } /* Function vect_analyze_loop. Apply a set of analyses on LOOP, and create a loop_vec_info struct for it. The different analyses will record information in the loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must be vectorized. */ loop_vec_info vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo) { loop_vec_info loop_vinfo; unsigned int vector_sizes; /* Autodetect first vector size we try. */ current_vector_size = 0; vector_sizes = targetm.vectorize.autovectorize_vector_sizes (); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "===== analyze_loop_nest =====\n"); if (loop_outer (loop) && loop_vec_info_for_loop (loop_outer (loop)) && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop)))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "outer-loop already vectorized.\n"); return NULL; } while (1) { /* Check the CFG characteristics of the loop (nesting, entry/exit). */ loop_vinfo = vect_analyze_loop_form (loop); if (!loop_vinfo) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad loop form.\n"); return NULL; } bool fatal = false; if (orig_loop_vinfo) LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo; if (vect_analyze_loop_2 (loop_vinfo, fatal)) { LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1; return loop_vinfo; } destroy_loop_vec_info (loop_vinfo, true); vector_sizes &= ~current_vector_size; if (fatal || vector_sizes == 0 || current_vector_size == 0) return NULL; /* Try the next biggest vector size. */ current_vector_size = 1 << floor_log2 (vector_sizes); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "***** Re-trying analysis with " "vector size %d\n", current_vector_size); } } /* Function reduction_code_for_scalar_code Input: CODE - tree_code of a reduction operations. Output: REDUC_CODE - the corresponding tree-code to be used to reduce the vector of partial results into a single scalar result, or ERROR_MARK if the operation is a supported reduction operation, but does not have such a tree-code. Return FALSE if CODE currently cannot be vectorized as reduction. */ static bool reduction_code_for_scalar_code (enum tree_code code, enum tree_code *reduc_code) { switch (code) { case MAX_EXPR: *reduc_code = REDUC_MAX_EXPR; return true; case MIN_EXPR: *reduc_code = REDUC_MIN_EXPR; return true; case PLUS_EXPR: *reduc_code = REDUC_PLUS_EXPR; return true; case MULT_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: *reduc_code = ERROR_MARK; return true; default: return false; } } /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement STMT is printed with a message MSG. */ static void report_vect_op (int msg_type, gimple *stmt, const char *msg) { dump_printf_loc (msg_type, vect_location, "%s", msg); dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0); } /* Detect SLP reduction of the form: #a1 = phi <a5, a0> a2 = operation (a1) a3 = operation (a2) a4 = operation (a3) a5 = operation (a4) #a = phi <a5> PHI is the reduction phi node (#a1 = phi <a5, a0> above) FIRST_STMT is the first reduction stmt in the chain (a2 = operation (a1)). Return TRUE if a reduction chain was detected. */ static bool vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi, gimple *first_stmt) { struct loop *loop = (gimple_bb (phi))->loop_father; struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); enum tree_code code; gimple *current_stmt = NULL, *loop_use_stmt = NULL, *first, *next_stmt; stmt_vec_info use_stmt_info, current_stmt_info; tree lhs; imm_use_iterator imm_iter; use_operand_p use_p; int nloop_uses, size = 0, n_out_of_loop_uses; bool found = false; if (loop != vect_loop) return false; lhs = PHI_RESULT (phi); code = gimple_assign_rhs_code (first_stmt); while (1) { nloop_uses = 0; n_out_of_loop_uses = 0; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; /* Check if we got back to the reduction phi. */ if (use_stmt == phi) { loop_use_stmt = use_stmt; found = true; break; } if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) { loop_use_stmt = use_stmt; nloop_uses++; } else n_out_of_loop_uses++; /* There are can be either a single use in the loop or two uses in phi nodes. */ if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses)) return false; } if (found) break; /* We reached a statement with no loop uses. */ if (nloop_uses == 0) return false; /* This is a loop exit phi, and we haven't reached the reduction phi. */ if (gimple_code (loop_use_stmt) == GIMPLE_PHI) return false; if (!is_gimple_assign (loop_use_stmt) || code != gimple_assign_rhs_code (loop_use_stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt))) return false; /* Insert USE_STMT into reduction chain. */ use_stmt_info = vinfo_for_stmt (loop_use_stmt); if (current_stmt) { current_stmt_info = vinfo_for_stmt (current_stmt); GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt; GROUP_FIRST_ELEMENT (use_stmt_info) = GROUP_FIRST_ELEMENT (current_stmt_info); } else GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt; lhs = gimple_assign_lhs (loop_use_stmt); current_stmt = loop_use_stmt; size++; } if (!found || loop_use_stmt != phi || size < 2) return false; /* Swap the operands, if needed, to make the reduction operand be the second operand. */ lhs = PHI_RESULT (phi); next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); while (next_stmt) { if (gimple_assign_rhs2 (next_stmt) == lhs) { tree op = gimple_assign_rhs1 (next_stmt); gimple *def_stmt = NULL; if (TREE_CODE (op) == SSA_NAME) def_stmt = SSA_NAME_DEF_STMT (op); /* Check that the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def_stmt && gimple_bb (def_stmt) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && (is_gimple_assign (def_stmt) || is_gimple_call (def_stmt) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_induction_def || (gimple_code (def_stmt) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def_stmt))))) { lhs = gimple_assign_lhs (next_stmt); next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); continue; } return false; } else { tree op = gimple_assign_rhs2 (next_stmt); gimple *def_stmt = NULL; if (TREE_CODE (op) == SSA_NAME) def_stmt = SSA_NAME_DEF_STMT (op); /* Check that the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def_stmt && gimple_bb (def_stmt) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && (is_gimple_assign (def_stmt) || is_gimple_call (def_stmt) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_induction_def || (gimple_code (def_stmt) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def_stmt))))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0); } swap_ssa_operands (next_stmt, gimple_assign_rhs1_ptr (next_stmt), gimple_assign_rhs2_ptr (next_stmt)); update_stmt (next_stmt); if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt))) LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; } else return false; } lhs = gimple_assign_lhs (next_stmt); next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); } /* Save the chain for further analysis in SLP detection. */ first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first); GROUP_SIZE (vinfo_for_stmt (first)) = size; return true; } /* Function vect_is_simple_reduction_1 (1) Detect a cross-iteration def-use cycle that represents a simple reduction computation. We look for the following pattern: loop_header: a1 = phi < a0, a2 > a3 = ... a2 = operation (a3, a1) or a3 = ... loop_header: a1 = phi < a0, a2 > a2 = operation (a3, a1) such that: 1. operation is commutative and associative and it is safe to change the order of the computation (if CHECK_REDUCTION is true) 2. no uses for a2 in the loop (a2 is used out of the loop) 3. no uses of a1 in the loop besides the reduction operation 4. no uses of a1 outside the loop. Conditions 1,4 are tested here. Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. (2) Detect a cross-iteration def-use cycle in nested loops, i.e., nested cycles, if CHECK_REDUCTION is false. (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double reductions: a1 = phi < a0, a2 > inner loop (def of a3) a2 = phi < a3 > (4) Detect condition expressions, ie: for (int i = 0; i < N; i++) if (a[i] < val) ret_val = a[i]; */ static gimple * vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi, bool check_reduction, bool *double_reduc, bool need_wrapping_integral_overflow, enum vect_reduction_type *v_reduc_type) { struct loop *loop = (gimple_bb (phi))->loop_father; struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); edge latch_e = loop_latch_edge (loop); tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); gimple *def_stmt, *def1 = NULL, *def2 = NULL, *phi_use_stmt = NULL; enum tree_code orig_code, code; tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE; tree type; int nloop_uses; tree name; imm_use_iterator imm_iter; use_operand_p use_p; bool phi_def; *double_reduc = false; *v_reduc_type = TREE_CODE_REDUCTION; /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization, otherwise, we assume outer loop vectorization. */ gcc_assert ((check_reduction && loop == vect_loop) || (!check_reduction && flow_loop_nested_p (vect_loop, loop))); name = PHI_RESULT (phi); /* ??? If there are no uses of the PHI result the inner loop reduction won't be detected as possibly double-reduction by vectorizable_reduction because that tries to walk the PHI arg from the preheader edge which can be constant. See PR60382. */ if (has_zero_uses (name)) return NULL; nloop_uses = 0; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "intermediate value used outside loop.\n"); return NULL; } nloop_uses++; if (nloop_uses > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction used in loop.\n"); return NULL; } phi_use_stmt = use_stmt; } if (TREE_CODE (loop_arg) != SSA_NAME) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction: not ssa_name: "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return NULL; } def_stmt = SSA_NAME_DEF_STMT (loop_arg); if (!def_stmt) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction: no def_stmt.\n"); return NULL; } if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI) { if (dump_enabled_p ()) dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0); return NULL; } if (is_gimple_assign (def_stmt)) { name = gimple_assign_lhs (def_stmt); phi_def = false; } else { name = PHI_RESULT (def_stmt); phi_def = true; } nloop_uses = 0; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) nloop_uses++; if (nloop_uses > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction used in loop.\n"); return NULL; } } /* If DEF_STMT is a phi node itself, we expect it to have a single argument defined in the inner loop. */ if (phi_def) { op1 = PHI_ARG_DEF (def_stmt, 0); if (gimple_phi_num_args (def_stmt) != 1 || TREE_CODE (op1) != SSA_NAME) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported phi node definition.\n"); return NULL; } def1 = SSA_NAME_DEF_STMT (op1); if (gimple_bb (def1) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && loop->inner && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1)) && is_gimple_assign (def1) && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected double reduction: "); *double_reduc = true; return def_stmt; } return NULL; } code = orig_code = gimple_assign_rhs_code (def_stmt); /* We can handle "res -= x[i]", which is non-associative by simply rewriting this into "res += -x[i]". Avoid changing gimple instruction for the first simple tests and only do this if we're allowed to change code at all. */ if (code == MINUS_EXPR && (op1 = gimple_assign_rhs1 (def_stmt)) && TREE_CODE (op1) == SSA_NAME && SSA_NAME_DEF_STMT (op1) == phi) code = PLUS_EXPR; if (code == COND_EXPR) { if (check_reduction) *v_reduc_type = COND_REDUCTION; } else if (!commutative_tree_code (code) || !associative_tree_code (code)) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: not commutative/associative: "); return NULL; } if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS) { if (code != COND_EXPR) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: not binary operation: "); return NULL; } op3 = gimple_assign_rhs1 (def_stmt); if (COMPARISON_CLASS_P (op3)) { op4 = TREE_OPERAND (op3, 1); op3 = TREE_OPERAND (op3, 0); } op1 = gimple_assign_rhs2 (def_stmt); op2 = gimple_assign_rhs3 (def_stmt); if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: uses not ssa_names: "); return NULL; } } else { op1 = gimple_assign_rhs1 (def_stmt); op2 = gimple_assign_rhs2 (def_stmt); if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: uses not ssa_names: "); return NULL; } } type = TREE_TYPE (gimple_assign_lhs (def_stmt)); if ((TREE_CODE (op1) == SSA_NAME && !types_compatible_p (type,TREE_TYPE (op1))) || (TREE_CODE (op2) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op2))) || (op3 && TREE_CODE (op3) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op3))) || (op4 && TREE_CODE (op4) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op4)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "reduction: multiple types: operation type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, type); dump_printf (MSG_NOTE, ", operands types: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op1)); dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op2)); if (op3) { dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op3)); } if (op4) { dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op4)); } dump_printf (MSG_NOTE, "\n"); } return NULL; } /* Check that it's ok to change the order of the computation. Generally, when vectorizing a reduction we change the order of the computation. This may change the behavior of the program in some cases, so we need to check that this is ok. One exception is when vectorizing an outer-loop: the inner-loop is executed sequentially, and therefore vectorizing reductions in the inner-loop during outer-loop vectorization is safe. */ if (*v_reduc_type != COND_REDUCTION && check_reduction) { /* CHECKME: check for !flag_finite_math_only too? */ if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math) { /* Changing the order of operations changes the semantics. */ if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unsafe fp math optimization: "); return NULL; } else if (INTEGRAL_TYPE_P (type)) { if (!operation_no_trapping_overflow (type, code)) { /* Changing the order of operations changes the semantics. */ if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unsafe int math optimization" " (overflow traps): "); return NULL; } if (need_wrapping_integral_overflow && !TYPE_OVERFLOW_WRAPS (type) && operation_can_overflow (code)) { /* Changing the order of operations changes the semantics. */ if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unsafe int math optimization" " (overflow doesn't wrap): "); return NULL; } } else if (SAT_FIXED_POINT_TYPE_P (type)) { /* Changing the order of operations changes the semantics. */ if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unsafe fixed-point math optimization: "); return NULL; } } /* Reduction is safe. We're dealing with one of the following: 1) integer arithmetic and no trapv 2) floating point arithmetic, and special flags permit this optimization 3) nested cycle (i.e., outer loop vectorization). */ if (TREE_CODE (op1) == SSA_NAME) def1 = SSA_NAME_DEF_STMT (op1); if (TREE_CODE (op2) == SSA_NAME) def2 = SSA_NAME_DEF_STMT (op2); if (code != COND_EXPR && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2)))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: "); return NULL; } /* Check that one def is the reduction def, defined by PHI, the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def2 && def2 == phi && (code == COND_EXPR || !def1 || gimple_nop_p (def1) || !flow_bb_inside_loop_p (loop, gimple_bb (def1)) || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1)) && (is_gimple_assign (def1) || is_gimple_call (def1) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_induction_def || (gimple_code (def1) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def1))))))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); return def_stmt; } if (def1 && def1 == phi && (code == COND_EXPR || !def2 || gimple_nop_p (def2) || !flow_bb_inside_loop_p (loop, gimple_bb (def2)) || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2)) && (is_gimple_assign (def2) || is_gimple_call (def2) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_induction_def || (gimple_code (def2) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def2))))))) { if (check_reduction && orig_code != MINUS_EXPR) { /* Check if we can swap operands (just for simplicity - so that the rest of the code can assume that the reduction variable is always the last (second) argument). */ if (code == COND_EXPR) { /* Swap cond_expr by inverting the condition. */ tree cond_expr = gimple_assign_rhs1 (def_stmt); enum tree_code invert_code = ERROR_MARK; enum tree_code cond_code = TREE_CODE (cond_expr); if (TREE_CODE_CLASS (cond_code) == tcc_comparison) { bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0)); invert_code = invert_tree_comparison (cond_code, honor_nans); } if (invert_code != ERROR_MARK) { TREE_SET_CODE (cond_expr, invert_code); swap_ssa_operands (def_stmt, gimple_assign_rhs2_ptr (def_stmt), gimple_assign_rhs3_ptr (def_stmt)); } else { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: cannot swap operands " "for cond_expr"); return NULL; } } else swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt), gimple_assign_rhs2_ptr (def_stmt)); if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: need to swap operands: "); if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt))) LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; } else { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); } return def_stmt; } /* Try to find SLP reduction chain. */ if (check_reduction && code != COND_EXPR && vect_is_slp_reduction (loop_info, phi, def_stmt)) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "reduction: detected reduction chain: "); return def_stmt; } if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unknown pattern: "); return NULL; } /* Wrapper around vect_is_simple_reduction_1, which will modify code in-place if it enables detection of more reductions. Arguments as there. */ gimple * vect_force_simple_reduction (loop_vec_info loop_info, gimple *phi, bool check_reduction, bool *double_reduc, bool need_wrapping_integral_overflow) { enum vect_reduction_type v_reduc_type; return vect_is_simple_reduction (loop_info, phi, check_reduction, double_reduc, need_wrapping_integral_overflow, &v_reduc_type); } /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */ int vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue, int *peel_iters_epilogue, stmt_vector_for_cost *scalar_cost_vec, stmt_vector_for_cost *prologue_cost_vec, stmt_vector_for_cost *epilogue_cost_vec) { int retval = 0; int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { *peel_iters_epilogue = vf/2; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "cost model: epilogue peel iters set to vf/2 " "because loop iterations are unknown .\n"); /* If peeled iterations are known but number of scalar loop iterations are unknown, count a taken branch per peeled loop. */ retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken, NULL, 0, vect_prologue); retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken, NULL, 0, vect_epilogue); } else { int niters = LOOP_VINFO_INT_NITERS (loop_vinfo); peel_iters_prologue = niters < peel_iters_prologue ? niters : peel_iters_prologue; *peel_iters_epilogue = (niters - peel_iters_prologue) % vf; /* If we need to peel for gaps, but no peeling is required, we have to peel VF iterations. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue) *peel_iters_epilogue = vf; } stmt_info_for_cost *si; int j; if (peel_iters_prologue) FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si) { stmt_vec_info stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; retval += record_stmt_cost (prologue_cost_vec, si->count * peel_iters_prologue, si->kind, stmt_info, si->misalign, vect_prologue); } if (*peel_iters_epilogue) FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si) { stmt_vec_info stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; retval += record_stmt_cost (epilogue_cost_vec, si->count * *peel_iters_epilogue, si->kind, stmt_info, si->misalign, vect_epilogue); } return retval; } /* Function vect_estimate_min_profitable_iters Return the number of iterations required for the vector version of the loop to be profitable relative to the cost of the scalar version of the loop. *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold of iterations for vectorization. -1 value means loop vectorization is not profitable. This returned value may be used for dynamic profitability check. *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used for static check against estimated number of iterations. */ static void vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, int *ret_min_profitable_niters, int *ret_min_profitable_estimate) { int min_profitable_iters; int min_profitable_estimate; int peel_iters_prologue; int peel_iters_epilogue; unsigned vec_inside_cost = 0; int vec_outside_cost = 0; unsigned vec_prologue_cost = 0; unsigned vec_epilogue_cost = 0; int scalar_single_iter_cost = 0; int scalar_outside_cost = 0; int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); /* Cost model disabled. */ if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) { dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n"); *ret_min_profitable_niters = 0; *ret_min_profitable_estimate = 0; return; } /* Requires loop versioning tests to handle misalignment. */ if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length (); (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, vect_prologue); dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning to treat misalignment.\n"); } /* Requires loop versioning with alias checks. */ if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length (); (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, vect_prologue); dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning aliasing.\n"); } /* Requires loop versioning with niter checks. */ if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0, vect_prologue); dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning niters.\n"); } if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_prologue); /* Count statements in scalar loop. Using this as scalar cost for a single iteration for now. TODO: Add outer loop support. TODO: Consider assigning different costs to different scalar statements. */ scalar_single_iter_cost = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo); /* Add additional cost for the peeled instructions in prologue and epilogue loop. FORNOW: If we don't know the value of peel_iters for prologue or epilogue at compile-time - we assume it's vf/2 (the worst would be vf-1). TODO: Build an expression that represents peel_iters for prologue and epilogue to be used in a run-time test. */ if (npeel < 0) { peel_iters_prologue = vf/2; dump_printf (MSG_NOTE, "cost model: " "prologue peel iters set to vf/2.\n"); /* If peeling for alignment is unknown, loop bound of main loop becomes unknown. */ peel_iters_epilogue = vf/2; dump_printf (MSG_NOTE, "cost model: " "epilogue peel iters set to vf/2 because " "peeling for alignment is unknown.\n"); /* If peeled iterations are unknown, count a taken branch and a not taken branch per peeled loop. Even if scalar loop iterations are known, vector iterations are not known since peeled prologue iterations are not known. Hence guards remain the same. */ (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_prologue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, NULL, 0, vect_prologue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_epilogue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, NULL, 0, vect_epilogue); stmt_info_for_cost *si; int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (target_cost_data, si->count * peel_iters_prologue, si->kind, stmt_info, si->misalign, vect_prologue); (void) add_stmt_cost (target_cost_data, si->count * peel_iters_epilogue, si->kind, stmt_info, si->misalign, vect_epilogue); } } else { stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec; stmt_info_for_cost *si; int j; void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); prologue_cost_vec.create (2); epilogue_cost_vec.create (2); peel_iters_prologue = npeel; (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue, &peel_iters_epilogue, &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), &prologue_cost_vec, &epilogue_cost_vec); FOR_EACH_VEC_ELT (prologue_cost_vec, j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (data, si->count, si->kind, stmt_info, si->misalign, vect_prologue); } FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (data, si->count, si->kind, stmt_info, si->misalign, vect_epilogue); } prologue_cost_vec.release (); epilogue_cost_vec.release (); } /* FORNOW: The scalar outside cost is incremented in one of the following ways: 1. The vectorizer checks for alignment and aliasing and generates a condition that allows dynamic vectorization. A cost model check is ANDED with the versioning condition. Hence scalar code path now has the added cost of the versioning check. if (cost > th & versioning_check) jmp to vector code Hence run-time scalar is incremented by not-taken branch cost. 2. The vectorizer then checks if a prologue is required. If the cost model check was not done before during versioning, it has to be done before the prologue check. if (cost <= th) prologue = scalar_iters if (prologue == 0) jmp to vector code else execute prologue if (prologue == num_iters) go to exit Hence the run-time scalar cost is incremented by a taken branch, plus a not-taken branch, plus a taken branch cost. 3. The vectorizer then checks if an epilogue is required. If the cost model check was not done before during prologue check, it has to be done with the epilogue check. if (prologue == 0) jmp to vector code else execute prologue if (prologue == num_iters) go to exit vector code: if ((cost <= th) | (scalar_iters-prologue-epilogue == 0)) jmp to epilogue Hence the run-time scalar cost should be incremented by 2 taken branches. TODO: The back end may reorder the BBS's differently and reverse conditions/branch directions. Change the estimates below to something more reasonable. */ /* If the number of iterations is known and we do not do versioning, we can decide whether to vectorize at compile time. Hence the scalar version do not carry cost model guard costs. */ if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) || LOOP_REQUIRES_VERSIONING (loop_vinfo)) { /* Cost model check occurs at versioning. */ if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken); else { /* Cost model check occurs at prologue generation. */ if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0) scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken) + vect_get_stmt_cost (cond_branch_not_taken); /* Cost model check occurs at epilogue generation. */ else scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken); } } /* Complete the target-specific cost calculations. */ finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost, &vec_inside_cost, &vec_epilogue_cost); vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n"); dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n", vec_inside_cost); dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost); dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost); dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n", scalar_single_iter_cost); dump_printf (MSG_NOTE, " Scalar outside cost: %d\n", scalar_outside_cost); dump_printf (MSG_NOTE, " Vector outside cost: %d\n", vec_outside_cost); dump_printf (MSG_NOTE, " prologue iterations: %d\n", peel_iters_prologue); dump_printf (MSG_NOTE, " epilogue iterations: %d\n", peel_iters_epilogue); } /* Calculate number of iterations required to make the vector version profitable, relative to the loop bodies only. The following condition must hold true: SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC where SIC = scalar iteration cost, VIC = vector iteration cost, VOC = vector outside cost, VF = vectorization factor, PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations SOC = scalar outside cost for run time cost model check. */ if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost) { if (vec_outside_cost <= 0) min_profitable_iters = 1; else { min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf - vec_inside_cost * peel_iters_prologue - vec_inside_cost * peel_iters_epilogue) / ((scalar_single_iter_cost * vf) - vec_inside_cost); if ((scalar_single_iter_cost * vf * min_profitable_iters) <= (((int) vec_inside_cost * min_profitable_iters) + (((int) vec_outside_cost - scalar_outside_cost) * vf))) min_profitable_iters++; } } /* vector version will never be profitable. */ else { if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize) warning_at (vect_location, OPT_Wopenmp_simd, "vectorization " "did not happen for a simd loop"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "cost model: the vector iteration cost = %d " "divided by the scalar iteration cost = %d " "is greater or equal to the vectorization factor = %d" ".\n", vec_inside_cost, scalar_single_iter_cost, vf); *ret_min_profitable_niters = -1; *ret_min_profitable_estimate = -1; return; } dump_printf (MSG_NOTE, " Calculated minimum iters for profitability: %d\n", min_profitable_iters); min_profitable_iters = min_profitable_iters < vf ? vf : min_profitable_iters; /* Because the condition we create is: if (niters <= min_profitable_iters) then skip the vectorized loop. */ min_profitable_iters--; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, " Runtime profitability threshold = %d\n", min_profitable_iters); *ret_min_profitable_niters = min_profitable_iters; /* Calculate number of iterations required to make the vector version profitable, relative to the loop bodies only. Non-vectorized variant is SIC * niters and it must win over vector variant on the expected loop trip count. The following condition must hold true: SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */ if (vec_outside_cost <= 0) min_profitable_estimate = 1; else { min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf - vec_inside_cost * peel_iters_prologue - vec_inside_cost * peel_iters_epilogue) / ((scalar_single_iter_cost * vf) - vec_inside_cost); } min_profitable_estimate --; min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, " Static estimate profitability threshold = %d\n", min_profitable_estimate); *ret_min_profitable_estimate = min_profitable_estimate; } /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET vector elements (not bits) for a vector of mode MODE. */ static void calc_vec_perm_mask_for_shift (enum machine_mode mode, unsigned int offset, unsigned char *sel) { unsigned int i, nelt = GET_MODE_NUNITS (mode); for (i = 0; i < nelt; i++) sel[i] = (i + offset) & (2*nelt - 1); } /* Checks whether the target supports whole-vector shifts for vectors of mode MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_ it supports vec_perm_const with masks for all necessary shift amounts. */ static bool have_whole_vector_shift (enum machine_mode mode) { if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing) return true; if (direct_optab_handler (vec_perm_const_optab, mode) == CODE_FOR_nothing) return false; unsigned int i, nelt = GET_MODE_NUNITS (mode); unsigned char *sel = XALLOCAVEC (unsigned char, nelt); for (i = nelt/2; i >= 1; i/=2) { calc_vec_perm_mask_for_shift (mode, i, sel); if (!can_vec_perm_p (mode, false, sel)) return false; } return true; } /* Return the reduction operand (with index REDUC_INDEX) of STMT. */ static tree get_reduction_op (gimple *stmt, int reduc_index) { switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) { case GIMPLE_SINGLE_RHS: gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op); return TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index); case GIMPLE_UNARY_RHS: return gimple_assign_rhs1 (stmt); case GIMPLE_BINARY_RHS: return (reduc_index ? gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt)); case GIMPLE_TERNARY_RHS: return gimple_op (stmt, reduc_index + 1); default: gcc_unreachable (); } } /* TODO: Close dependency between vect_model_*_cost and vectorizable_* functions. Design better to avoid maintenance issues. */ /* Function vect_model_reduction_cost. Models cost for a reduction operation, including the vector ops generated within the strip-mine loop, the initial definition before the loop, and the epilogue code that must be generated. */ static bool vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code, int ncopies, int reduc_index) { int prologue_cost = 0, epilogue_cost = 0; enum tree_code code; optab optab; tree vectype; gimple *stmt, *orig_stmt; tree reduction_op; machine_mode mode; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = NULL; void *target_cost_data; if (loop_vinfo) { loop = LOOP_VINFO_LOOP (loop_vinfo); target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); } else target_cost_data = BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info)); /* Condition reductions generate two reductions in the loop. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) ncopies *= 2; /* Cost of reduction op inside loop. */ unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, stmt_info, 0, vect_body); stmt = STMT_VINFO_STMT (stmt_info); reduction_op = get_reduction_op (stmt, reduc_index); vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op)); if (!vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, TREE_TYPE (reduction_op)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } mode = TYPE_MODE (vectype); orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (!orig_stmt) orig_stmt = STMT_VINFO_STMT (stmt_info); code = gimple_assign_rhs_code (orig_stmt); /* Add in cost for initial definition. For cond reduction we have four vectors: initial index, step, initial result of the data reduction, initial value of the index reduction. */ int prologue_stmts = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION ? 4 : 1; prologue_cost += add_stmt_cost (target_cost_data, prologue_stmts, scalar_to_vec, stmt_info, 0, vect_prologue); /* Determine cost of epilogue code. We have a reduction operator that will reduce the vector in one statement. Also requires scalar extract. */ if (!loop || !nested_in_vect_loop_p (loop, orig_stmt)) { if (reduc_code != ERROR_MARK) { if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) { /* An EQ stmt and an COND_EXPR stmt. */ epilogue_cost += add_stmt_cost (target_cost_data, 2, vector_stmt, stmt_info, 0, vect_epilogue); /* Reduction of the max index and a reduction of the found values. */ epilogue_cost += add_stmt_cost (target_cost_data, 2, vec_to_scalar, stmt_info, 0, vect_epilogue); /* A broadcast of the max value. */ epilogue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec, stmt_info, 0, vect_epilogue); } else { epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt, stmt_info, 0, vect_epilogue); epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar, stmt_info, 0, vect_epilogue); } } else { int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); tree bitsize = TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt))); int element_bitsize = tree_to_uhwi (bitsize); int nelements = vec_size_in_bits / element_bitsize; optab = optab_for_tree_code (code, vectype, optab_default); /* We have a whole vector shift available. */ if (VECTOR_MODE_P (mode) && optab_handler (optab, mode) != CODE_FOR_nothing && have_whole_vector_shift (mode)) { /* Final reduction via vector shifts and the reduction operator. Also requires scalar extract. */ epilogue_cost += add_stmt_cost (target_cost_data, exact_log2 (nelements) * 2, vector_stmt, stmt_info, 0, vect_epilogue); epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar, stmt_info, 0, vect_epilogue); } else /* Use extracts and reduction op for final reduction. For N elements, we have N extracts and N-1 reduction ops. */ epilogue_cost += add_stmt_cost (target_cost_data, nelements + nelements - 1, vector_stmt, stmt_info, 0, vect_epilogue); } } if (dump_enabled_p ()) dump_printf (MSG_NOTE, "vect_model_reduction_cost: inside_cost = %d, " "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost, prologue_cost, epilogue_cost); return true; } /* Function vect_model_induction_cost. Models cost for induction operations. */ static void vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies) { loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); unsigned inside_cost, prologue_cost; /* loop cost for vec_loop. */ inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, stmt_info, 0, vect_body); /* prologue cost for vec_init and vec_step. */ prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec, stmt_info, 0, vect_prologue); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vect_model_induction_cost: inside_cost = %d, " "prologue_cost = %d .\n", inside_cost, prologue_cost); } /* Function get_initial_def_for_induction Input: STMT - a stmt that performs an induction operation in the loop. IV_PHI - the initial value of the induction variable Output: Return a vector variable, initialized with the first VF values of the induction variable. E.g., for an iv with IV_PHI='X' and evolution S, for a vector of 4 units, we want to return: [X, X + S, X + 2*S, X + 3*S]. */ static tree get_initial_def_for_induction (gimple *iv_phi) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype; int nunits; edge pe = loop_preheader_edge (loop); struct loop *iv_loop; basic_block new_bb; tree new_vec, vec_init, vec_step, t; tree new_name; gimple *new_stmt; gphi *induction_phi; tree induc_def, vec_def, vec_dest; tree init_expr, step_expr; int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); int i; int ncopies; tree expr; stmt_vec_info phi_info = vinfo_for_stmt (iv_phi); bool nested_in_vect_loop = false; gimple_seq stmts; imm_use_iterator imm_iter; use_operand_p use_p; gimple *exit_phi; edge latch_e; tree loop_arg; gimple_stmt_iterator si; basic_block bb = gimple_bb (iv_phi); tree stepvectype; tree resvectype; /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */ if (nested_in_vect_loop_p (loop, iv_phi)) { nested_in_vect_loop = true; iv_loop = loop->inner; } else iv_loop = loop; gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father); latch_e = loop_latch_edge (iv_loop); loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e); step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info); gcc_assert (step_expr != NULL_TREE); pe = loop_preheader_edge (iv_loop); init_expr = PHI_ARG_DEF_FROM_EDGE (iv_phi, loop_preheader_edge (iv_loop)); vectype = get_vectype_for_scalar_type (TREE_TYPE (init_expr)); resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi))); gcc_assert (vectype); nunits = TYPE_VECTOR_SUBPARTS (vectype); ncopies = vf / nunits; gcc_assert (phi_info); gcc_assert (ncopies >= 1); /* Convert the step to the desired type. */ stmts = NULL; step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr); if (stmts) { new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } /* Find the first insertion point in the BB. */ si = gsi_after_labels (bb); /* Create the vector that holds the initial_value of the induction. */ if (nested_in_vect_loop) { /* iv_loop is nested in the loop to be vectorized. init_expr had already been created during vectorization of previous stmts. We obtain it from the STMT_VINFO_VEC_STMT of the defining stmt. */ vec_init = vect_get_vec_def_for_operand (init_expr, iv_phi); /* If the initial value is not of proper type, convert it. */ if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init))) { new_stmt = gimple_build_assign (vect_get_new_ssa_name (vectype, vect_simple_var, "vec_iv_"), VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, vectype, vec_init)); vec_init = gimple_assign_lhs (new_stmt); new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop), new_stmt); gcc_assert (!new_bb); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); } } else { vec<constructor_elt, va_gc> *v; /* iv_loop is the loop to be vectorized. Create: vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */ stmts = NULL; new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr); vec_alloc (v, nunits); bool constant_p = is_gimple_min_invariant (new_name); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name); for (i = 1; i < nunits; i++) { /* Create: new_name_i = new_name + step_expr */ new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name), new_name, step_expr); if (!is_gimple_min_invariant (new_name)) constant_p = false; CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name); } if (stmts) { new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */ if (constant_p) new_vec = build_vector_from_ctor (vectype, v); else new_vec = build_constructor (vectype, v); vec_init = vect_init_vector (iv_phi, new_vec, vectype, NULL); } /* Create the vector that holds the step of the induction. */ if (nested_in_vect_loop) /* iv_loop is nested in the loop to be vectorized. Generate: vec_step = [S, S, S, S] */ new_name = step_expr; else { /* iv_loop is the loop to be vectorized. Generate: vec_step = [VF*S, VF*S, VF*S, VF*S] */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, vf); expr = fold_convert (TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), vf); new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (TREE_CODE (step_expr) == SSA_NAME) new_name = vect_init_vector (iv_phi, new_name, TREE_TYPE (step_expr), NULL); } t = unshare_expr (new_name); gcc_assert (CONSTANT_CLASS_P (new_name) || TREE_CODE (new_name) == SSA_NAME); stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name)); gcc_assert (stepvectype); new_vec = build_vector_from_val (stepvectype, t); vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL); /* Create the following def-use cycle: loop prolog: vec_init = ... vec_step = ... loop: vec_iv = PHI <vec_init, vec_loop> ... STMT ... vec_loop = vec_iv + vec_step; */ /* Create the induction-phi that defines the induction-operand. */ vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"); induction_phi = create_phi_node (vec_dest, iv_loop->header); set_vinfo_for_stmt (induction_phi, new_stmt_vec_info (induction_phi, loop_vinfo)); induc_def = PHI_RESULT (induction_phi); /* Create the iv update inside the loop */ new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR, induc_def, vec_step); vec_def = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, vec_def); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); /* Set the arguments of the phi node: */ add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION); add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop), UNKNOWN_LOCATION); /* In case that vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. */ if (ncopies > 1) { stmt_vec_info prev_stmt_vinfo; /* FORNOW. This restriction should be relaxed. */ gcc_assert (!nested_in_vect_loop); /* Create the vector that holds the step of the induction. */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, nunits); expr = fold_convert (TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), nunits); new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (TREE_CODE (step_expr) == SSA_NAME) new_name = vect_init_vector (iv_phi, new_name, TREE_TYPE (step_expr), NULL); t = unshare_expr (new_name); gcc_assert (CONSTANT_CLASS_P (new_name) || TREE_CODE (new_name) == SSA_NAME); new_vec = build_vector_from_val (stepvectype, t); vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL); vec_def = induc_def; prev_stmt_vinfo = vinfo_for_stmt (induction_phi); for (i = 1; i < ncopies; i++) { /* vec_i = vec_prev + vec_step */ new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR, vec_def, vec_step); vec_def = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, vec_def); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); if (!useless_type_conversion_p (resvectype, vectype)) { new_stmt = gimple_build_assign (vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"), VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, resvectype, gimple_assign_lhs (new_stmt))); gimple_assign_set_lhs (new_stmt, make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt)); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); } set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt; prev_stmt_vinfo = vinfo_for_stmt (new_stmt); } } if (nested_in_vect_loop) { /* Find the loop-closed exit-phi of the induction, and record the final vector of induction results: */ exit_phi = NULL; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt))) { exit_phi = use_stmt; break; } } if (exit_phi) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi); /* FORNOW. Currently not supporting the case that an inner-loop induction is not used in the outer-loop (i.e. only outside the outer-loop). */ gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo) && !STMT_VINFO_LIVE_P (stmt_vinfo)); STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vector of inductions after inner-loop:"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); } } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "transform induction: created def-use cycle: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (vec_def), 0); } STMT_VINFO_VEC_STMT (phi_info) = induction_phi; if (!useless_type_conversion_p (resvectype, vectype)) { new_stmt = gimple_build_assign (vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"), VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, resvectype, induc_def)); induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt); gimple_assign_set_lhs (new_stmt, induc_def); si = gsi_after_labels (bb); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt)) = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi)); } return induc_def; } /* Function get_initial_def_for_reduction Input: STMT - a stmt that performs a reduction operation in the loop. INIT_VAL - the initial value of the reduction variable Output: ADJUSTMENT_DEF - a tree that holds a value to be added to the final result of the reduction (used for adjusting the epilog - see below). Return a vector variable, initialized according to the operation that STMT performs. This vector will be used as the initial value of the vector of partial results. Option1 (adjust in epilog): Initialize the vector as follows: add/bit or/xor: [0,0,...,0,0] mult/bit and: [1,1,...,1,1] min/max/cond_expr: [init_val,init_val,..,init_val,init_val] and when necessary (e.g. add/mult case) let the caller know that it needs to adjust the result by init_val. Option2: Initialize the vector as follows: add/bit or/xor: [init_val,0,0,...,0] mult/bit and: [init_val,1,1,...,1] min/max/cond_expr: [init_val,init_val,...,init_val] and no adjustments are needed. For example, for the following code: s = init_val; for (i=0;i<n;i++) s = s + a[i]; STMT is 's = s + a[i]', and the reduction variable is 's'. For a vector of 4 units, we want to return either [0,0,0,init_val], or [0,0,0,0] and let the caller know that it needs to adjust the result at the end by 'init_val'. FORNOW, we are using the 'adjust in epilog' scheme, because this way the initialization vector is simpler (same element in all entries), if ADJUSTMENT_DEF is not NULL, and Option2 otherwise. A cost model should help decide between these two schemes. */ tree get_initial_def_for_reduction (gimple *stmt, tree init_val, tree *adjustment_def) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree scalar_type = TREE_TYPE (init_val); tree vectype = get_vectype_for_scalar_type (scalar_type); int nunits; enum tree_code code = gimple_assign_rhs_code (stmt); tree def_for_init; tree init_def; tree *elts; int i; bool nested_in_vect_loop = false; REAL_VALUE_TYPE real_init_val = dconst0; int int_init_val = 0; gimple *def_stmt = NULL; gimple_seq stmts = NULL; gcc_assert (vectype); nunits = TYPE_VECTOR_SUBPARTS (vectype); gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type) || SCALAR_FLOAT_TYPE_P (scalar_type)); if (nested_in_vect_loop_p (loop, stmt)) nested_in_vect_loop = true; else gcc_assert (loop == (gimple_bb (stmt))->loop_father); /* In case of double reduction we only create a vector variable to be put in the reduction phi node. The actual statement creation is done in vect_create_epilog_for_reduction. */ if (adjustment_def && nested_in_vect_loop && TREE_CODE (init_val) == SSA_NAME && (def_stmt = SSA_NAME_DEF_STMT (init_val)) && gimple_code (def_stmt) == GIMPLE_PHI && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && vinfo_for_stmt (def_stmt) && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_double_reduction_def) { *adjustment_def = NULL; return vect_create_destination_var (init_val, vectype); } /* In case of a nested reduction do not use an adjustment def as that case is not supported by the epilogue generation correctly if ncopies is not one. */ if (adjustment_def && nested_in_vect_loop) { *adjustment_def = NULL; return vect_get_vec_def_for_operand (init_val, stmt); } switch (code) { case WIDEN_SUM_EXPR: case DOT_PROD_EXPR: case SAD_EXPR: case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case MULT_EXPR: case BIT_AND_EXPR: /* ADJUSTMENT_DEF is NULL when called from vect_create_epilog_for_reduction to vectorize double reduction. */ if (adjustment_def) *adjustment_def = init_val; if (code == MULT_EXPR) { real_init_val = dconst1; int_init_val = 1; } if (code == BIT_AND_EXPR) int_init_val = -1; if (SCALAR_FLOAT_TYPE_P (scalar_type)) def_for_init = build_real (scalar_type, real_init_val); else def_for_init = build_int_cst (scalar_type, int_init_val); /* Create a vector of '0' or '1' except the first element. */ elts = XALLOCAVEC (tree, nunits); for (i = nunits - 2; i >= 0; --i) elts[i + 1] = def_for_init; /* Option1: the first element is '0' or '1' as well. */ if (adjustment_def) { elts[0] = def_for_init; init_def = build_vector (vectype, elts); break; } /* Option2: the first element is INIT_VAL. */ elts[0] = init_val; if (TREE_CONSTANT (init_val)) init_def = build_vector (vectype, elts); else { vec<constructor_elt, va_gc> *v; vec_alloc (v, nunits); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val); for (i = 1; i < nunits; ++i) CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]); init_def = build_constructor (vectype, v); } break; case MIN_EXPR: case MAX_EXPR: case COND_EXPR: if (adjustment_def) { *adjustment_def = NULL_TREE; if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo) != COND_REDUCTION) { init_def = vect_get_vec_def_for_operand (init_val, stmt); break; } } init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val); if (! gimple_seq_empty_p (stmts)) gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); init_def = build_vector_from_val (vectype, init_val); break; default: gcc_unreachable (); } return init_def; } /* Function vect_create_epilog_for_reduction Create code at the loop-epilog to finalize the result of a reduction computation. VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector reduction statements. STMT is the scalar reduction stmt that is being vectorized. NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits). In this case we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. REDUC_CODE is the tree-code for the epilog reduction. REDUCTION_PHIS is a list of the phi-nodes that carry the reduction computation. REDUC_INDEX is the index of the operand in the right hand side of the statement that is defined by REDUCTION_PHI. DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled. SLP_NODE is an SLP node containing a group of reduction statements. The first one in this group is STMT. INDUCTION_INDEX is the index of the loop for condition reductions. Otherwise it is undefined. INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case when the COND_EXPR is never true in the loop. It needs to be smaller than any value of the IV in the loop. This function: 1. Creates the reduction def-use cycles: sets the arguments for REDUCTION_PHIS: The loop-entry argument is the vectorized initial-value of the reduction. The loop-latch argument is taken from VECT_DEFS - the vector of partial sums. 2. "Reduces" each vector of partial results VECT_DEFS into a single result, by applying the operation specified by REDUC_CODE if available, or by other means (whole-vector shifts or a scalar loop). The function also creates a new phi node at the loop exit to preserve loop-closed form, as illustrated below. The flow at the entry to this function: loop: vec_def = phi <null, null> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT s_loop = scalar_stmt # (scalar) STMT loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI use <s_out0> use <s_out0> The above is transformed by this function into: loop: vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT s_loop = scalar_stmt # (scalar) STMT loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out4> use <s_out4> */ static void vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, int ncopies, enum tree_code reduc_code, vec<gimple *> reduction_phis, int reduc_index, bool double_reduc, slp_tree slp_node, tree induction_index, tree induc_val) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info prev_phi_info; tree vectype; machine_mode mode; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL; basic_block exit_bb; tree scalar_dest; tree scalar_type; gimple *new_phi = NULL, *phi; gimple_stmt_iterator exit_gsi; tree vec_dest; tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest; gimple *epilog_stmt = NULL; enum tree_code code = gimple_assign_rhs_code (stmt); gimple *exit_phi; tree bitsize; tree adjustment_def = NULL; tree vec_initial_def = NULL; tree reduction_op, expr, def, initial_def = NULL; tree orig_name, scalar_result; imm_use_iterator imm_iter, phi_imm_iter; use_operand_p use_p, phi_use_p; gimple *use_stmt, *orig_stmt, *reduction_phi = NULL; bool nested_in_vect_loop = false; auto_vec<gimple *> new_phis; auto_vec<gimple *> inner_phis; enum vect_def_type dt = vect_unknown_def_type; int j, i; auto_vec<tree> scalar_results; unsigned int group_size = 1, k, ratio; auto_vec<tree> vec_initial_defs; auto_vec<gimple *> phis; bool slp_reduc = false; tree new_phi_result; gimple *inner_phi = NULL; if (slp_node) group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); if (nested_in_vect_loop_p (loop, stmt)) { outer_loop = loop; loop = loop->inner; nested_in_vect_loop = true; gcc_assert (!slp_node); } reduction_op = get_reduction_op (stmt, reduc_index); vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op)); gcc_assert (vectype); mode = TYPE_MODE (vectype); /* 1. Create the reduction def-use cycle: Set the arguments of REDUCTION_PHIS, i.e., transform loop: vec_def = phi <null, null> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT ... into: loop: vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT ... (in case of SLP, do it for all the phis). */ /* Get the loop-entry arguments. */ enum vect_def_type initial_def_dt = vect_unknown_def_type; if (slp_node) vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs, NULL, slp_node, reduc_index); else { /* Get at the scalar def before the loop, that defines the initial value of the reduction variable. */ gimple *def_stmt = SSA_NAME_DEF_STMT (reduction_op); initial_def = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop)); /* Optimize: if initial_def is for REDUC_MAX smaller than the base and we can't use zero for induc_val, use initial_def. Similarly for REDUC_MIN and initial_def larger than the base. */ if (TREE_CODE (initial_def) == INTEGER_CST && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) && !integer_zerop (induc_val) && tree_int_cst_lt (initial_def, induc_val)) induc_val = initial_def; vect_is_simple_use (initial_def, loop_vinfo, &def_stmt, &initial_def_dt); vec_initial_def = get_initial_def_for_reduction (stmt, initial_def, &adjustment_def); vec_initial_defs.create (1); vec_initial_defs.quick_push (vec_initial_def); } /* Set phi nodes arguments. */ FOR_EACH_VEC_ELT (reduction_phis, i, phi) { tree vec_init_def, def; gimple_seq stmts; vec_init_def = force_gimple_operand (vec_initial_defs[i], &stmts, true, NULL_TREE); if (stmts) gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); def = vect_defs[i]; for (j = 0; j < ncopies; j++) { if (j != 0) { phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); if (nested_in_vect_loop) vec_init_def = vect_get_vec_def_for_stmt_copy (initial_def_dt, vec_init_def); } /* Set the loop-entry arg of the reduction-phi. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) { /* Initialise the reduction phi to zero. This prevents initial values of non-zero interferring with the reduction op. */ gcc_assert (ncopies == 1); gcc_assert (i == 0); tree vec_init_def_type = TREE_TYPE (vec_init_def); tree induc_val_vec = build_vector_from_val (vec_init_def_type, induc_val); add_phi_arg (as_a <gphi *> (phi), induc_val_vec, loop_preheader_edge (loop), UNKNOWN_LOCATION); } else add_phi_arg (as_a <gphi *> (phi), vec_init_def, loop_preheader_edge (loop), UNKNOWN_LOCATION); /* Set the loop-latch arg for the reduction-phi. */ if (j > 0) def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def); add_phi_arg (as_a <gphi *> (phi), def, loop_latch_edge (loop), UNKNOWN_LOCATION); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "transform reduction: created def-use cycle: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0); } } } /* 2. Create epilog code. The reduction epilog code operates across the elements of the vector of partial results computed by the vectorized loop. The reduction epilog code consists of: step 1: compute the scalar result in a vector (v_out2) step 2: extract the scalar result (s_out3) from the vector (v_out2) step 3: adjust the scalar result (s_out3) if needed. Step 1 can be accomplished using one the following three schemes: (scheme 1) using reduc_code, if available. (scheme 2) using whole-vector shifts, if available. (scheme 3) using a scalar loop. In this case steps 1+2 above are combined. The overall epilog code looks like this: s_out0 = phi <s_loop> # original EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> # step 1 s_out3 = extract_field <v_out2, 0> # step 2 s_out4 = adjust_result <s_out3> # step 3 (step 3 is optional, and steps 1 and 2 may be combined). Lastly, the uses of s_out0 are replaced by s_out4. */ /* 2.1 Create new loop-exit-phis to preserve loop-closed form: v_out1 = phi <VECT_DEF> Store them in NEW_PHIS. */ exit_bb = single_exit (loop)->dest; prev_phi_info = NULL; new_phis.create (vect_defs.length ()); FOR_EACH_VEC_ELT (vect_defs, i, def) { for (j = 0; j < ncopies; j++) { tree new_def = copy_ssa_name (def); phi = create_phi_node (new_def, exit_bb); set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo)); if (j == 0) new_phis.quick_push (phi); else { def = vect_get_vec_def_for_stmt_copy (dt, def); STMT_VINFO_RELATED_STMT (prev_phi_info) = phi; } SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def); prev_phi_info = vinfo_for_stmt (phi); } } /* The epilogue is created for the outer-loop, i.e., for the loop being vectorized. Create exit phis for the outer loop. */ if (double_reduc) { loop = outer_loop; exit_bb = single_exit (loop)->dest; inner_phis.create (vect_defs.length ()); FOR_EACH_VEC_ELT (new_phis, i, phi) { tree new_result = copy_ssa_name (PHI_RESULT (phi)); gphi *outer_phi = create_phi_node (new_result, exit_bb); SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx, PHI_RESULT (phi)); set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi, loop_vinfo)); inner_phis.quick_push (phi); new_phis[i] = outer_phi; prev_phi_info = vinfo_for_stmt (outer_phi); while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi))) { phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); new_result = copy_ssa_name (PHI_RESULT (phi)); outer_phi = create_phi_node (new_result, exit_bb); SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx, PHI_RESULT (phi)); set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi, loop_vinfo)); STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi; prev_phi_info = vinfo_for_stmt (outer_phi); } } } exit_gsi = gsi_after_labels (exit_bb); /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3 (i.e. when reduc_code is not available) and in the final adjustment code (if needed). Also get the original scalar reduction variable as defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it represents a reduction pattern), the tree-code and scalar-def are taken from the original stmt that the pattern-stmt (STMT) replaces. Otherwise (it is a regular reduction) - the tree-code and scalar-def are taken from STMT. */ orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (!orig_stmt) { /* Regular reduction */ orig_stmt = stmt; } else { /* Reduction pattern */ stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt); gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)); gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt); } code = gimple_assign_rhs_code (orig_stmt); /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore, partial results are added and not subtracted. */ if (code == MINUS_EXPR) code = PLUS_EXPR; scalar_dest = gimple_assign_lhs (orig_stmt); scalar_type = TREE_TYPE (scalar_dest); scalar_results.create (group_size); new_scalar_dest = vect_create_destination_var (scalar_dest, NULL); bitsize = TYPE_SIZE (scalar_type); /* In case this is a reduction in an inner-loop while vectorizing an outer loop - we don't need to extract a single scalar result at the end of the inner-loop (unless it is double reduction, i.e., the use of reduction is outside the outer-loop). The final vector of partial results will be used in the vectorized outer-loop, or reduced to a scalar result at the end of the outer-loop. */ if (nested_in_vect_loop && !double_reduc) goto vect_finalize_reduction; /* SLP reduction without reduction chain, e.g., # a1 = phi <a2, a0> # b1 = phi <b2, b0> a2 = operation (a1) b2 = operation (b1) */ slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); /* In case of reduction chain, e.g., # a1 = phi <a3, a0> a2 = operation (a1) a3 = operation (a2), we may end up with more than one vector result. Here we reduce them to one vector. */ if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) { tree first_vect = PHI_RESULT (new_phis[0]); tree tmp; gassign *new_vec_stmt = NULL; vec_dest = vect_create_destination_var (scalar_dest, vectype); for (k = 1; k < new_phis.length (); k++) { gimple *next_phi = new_phis[k]; tree second_vect = PHI_RESULT (next_phi); tmp = build2 (code, vectype, first_vect, second_vect); new_vec_stmt = gimple_build_assign (vec_dest, tmp); first_vect = make_ssa_name (vec_dest, new_vec_stmt); gimple_assign_set_lhs (new_vec_stmt, first_vect); gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT); } new_phi_result = first_vect; if (new_vec_stmt) { new_phis.truncate (0); new_phis.safe_push (new_vec_stmt); } } else new_phi_result = PHI_RESULT (new_phis[0]); if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) { /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing various data values where the condition matched and another vector (INDUCTION_INDEX) containing all the indexes of those matches. We need to extract the last matching index (which will be the index with highest value) and use this to index into the data vector. For the case where there were no matches, the data vector will contain all default values and the index vector will be all zeros. */ /* Get various versions of the type of the vector of indexes. */ tree index_vec_type = TREE_TYPE (induction_index); gcc_checking_assert (TYPE_UNSIGNED (index_vec_type)); tree index_scalar_type = TREE_TYPE (index_vec_type); tree index_vec_cmp_type = build_same_sized_truth_vector_type (index_vec_type); /* Get an unsigned integer version of the type of the data vector. */ int scalar_precision = GET_MODE_PRECISION (TYPE_MODE (scalar_type)); tree scalar_type_unsigned = make_unsigned_type (scalar_precision); tree vectype_unsigned = build_vector_type (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype)); /* First we need to create a vector (ZERO_VEC) of zeros and another vector (MAX_INDEX_VEC) filled with the last matching index, which we can create using a MAX reduction and then expanding. In the case where the loop never made any matches, the max index will be zero. */ /* Vector of {0, 0, 0,...}. */ tree zero_vec = make_ssa_name (vectype); tree zero_vec_rhs = build_zero_cst (vectype); gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs); gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT); /* Find maximum value from the vector of found indexes. */ tree max_index = make_ssa_name (index_scalar_type); gimple *max_index_stmt = gimple_build_assign (max_index, REDUC_MAX_EXPR, induction_index); gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT); /* Vector of {max_index, max_index, max_index,...}. */ tree max_index_vec = make_ssa_name (index_vec_type); tree max_index_vec_rhs = build_vector_from_val (index_vec_type, max_index); gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec, max_index_vec_rhs); gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT); /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes with the vector (INDUCTION_INDEX) of found indexes, choosing values from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC) otherwise. Only one value should match, resulting in a vector (VEC_COND) with one data value and the rest zeros. In the case where the loop never made any matches, every index will match, resulting in a vector with all data values (which will all be the default value). */ /* Compare the max index vector to the vector of found indexes to find the position of the max value. */ tree vec_compare = make_ssa_name (index_vec_cmp_type); gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR, induction_index, max_index_vec); gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT); /* Use the compare to choose either values from the data vector or zero. */ tree vec_cond = make_ssa_name (vectype); gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR, vec_compare, new_phi_result, zero_vec); gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT); /* Finally we need to extract the data value from the vector (VEC_COND) into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR reduction, but because this doesn't exist, we can use a MAX reduction instead. The data value might be signed or a float so we need to cast it first. In the case where the loop never made any matches, the data values are all identical, and so will reduce down correctly. */ /* Make the matched data values unsigned. */ tree vec_cond_cast = make_ssa_name (vectype_unsigned); tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned, vec_cond); gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast, VIEW_CONVERT_EXPR, vec_cond_cast_rhs); gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT); /* Reduce down to a scalar value. */ tree data_reduc = make_ssa_name (scalar_type_unsigned); optab ot = optab_for_tree_code (REDUC_MAX_EXPR, vectype_unsigned, optab_default); gcc_assert (optab_handler (ot, TYPE_MODE (vectype_unsigned)) != CODE_FOR_nothing); gimple *data_reduc_stmt = gimple_build_assign (data_reduc, REDUC_MAX_EXPR, vec_cond_cast); gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT); /* Convert the reduced value back to the result type and set as the result. */ tree data_reduc_cast = build1 (VIEW_CONVERT_EXPR, scalar_type, data_reduc); epilog_stmt = gimple_build_assign (new_scalar_dest, data_reduc_cast); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); scalar_results.safe_push (new_temp); } /* 2.3 Create the reduction code, using one of the three schemes described above. In SLP we simply need to extract all the elements from the vector (without reducing them), so we use scalar shifts. */ else if (reduc_code != ERROR_MARK && !slp_reduc) { tree tmp; tree vec_elem_type; /*** Case 1: Create: v_out2 = reduc_expr <v_out1> */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using direct vector reduction.\n"); vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result)); if (!useless_type_conversion_p (scalar_type, vec_elem_type)) { tree tmp_dest = vect_create_destination_var (scalar_dest, vec_elem_type); tmp = build1 (reduc_code, vec_elem_type, new_phi_result); epilog_stmt = gimple_build_assign (tmp_dest, tmp); new_temp = make_ssa_name (tmp_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); tmp = build1 (NOP_EXPR, scalar_type, new_temp); } else tmp = build1 (reduc_code, scalar_type, new_phi_result); epilog_stmt = gimple_build_assign (new_scalar_dest, tmp); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) && !operand_equal_p (initial_def, induc_val, 0)) { /* Earlier we set the initial value to be a vector if induc_val values. Check the result and if it is induc_val then replace with the original initial value, unless induc_val is the same as initial_def already. */ tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp, induc_val); tmp = make_ssa_name (new_scalar_dest); epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare, initial_def, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); new_temp = tmp; } scalar_results.safe_push (new_temp); } else { bool reduce_with_shift = have_whole_vector_shift (mode); int element_bitsize = tree_to_uhwi (bitsize); int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); tree vec_temp; /* Regardless of whether we have a whole vector shift, if we're emulating the operation via tree-vect-generic, we don't want to use it. Only the first round of the reduction is likely to still be profitable via emulation. */ /* ??? It might be better to emit a reduction tree code here, so that tree-vect-generic can expand the first round via bit tricks. */ if (!VECTOR_MODE_P (mode)) reduce_with_shift = false; else { optab optab = optab_for_tree_code (code, vectype, optab_default); if (optab_handler (optab, mode) == CODE_FOR_nothing) reduce_with_shift = false; } if (reduce_with_shift && !slp_reduc) { int nelements = vec_size_in_bits / element_bitsize; unsigned char *sel = XALLOCAVEC (unsigned char, nelements); int elt_offset; tree zero_vec = build_zero_cst (vectype); /*** Case 2: Create: for (offset = nelements/2; offset >= 1; offset/=2) { Create: va' = vec_shift <va, offset> Create: va = vop <va, va'> } */ tree rhs; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using vector shifts\n"); vec_dest = vect_create_destination_var (scalar_dest, vectype); new_temp = new_phi_result; for (elt_offset = nelements / 2; elt_offset >= 1; elt_offset /= 2) { calc_vec_perm_mask_for_shift (mode, elt_offset, sel); tree mask = vect_gen_perm_mask_any (vectype, sel); epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR, new_temp, zero_vec, mask); new_name = make_ssa_name (vec_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_name); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); epilog_stmt = gimple_build_assign (vec_dest, code, new_name, new_temp); new_temp = make_ssa_name (vec_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } /* 2.4 Extract the final scalar result. Create: s_out3 = extract_field <v_out2, bitpos> */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "extract scalar result\n"); rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitsize_zero_node); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); scalar_results.safe_push (new_temp); } else { /*** Case 3: Create: s = extract_field <v_out2, 0> for (offset = element_size; offset < vector_size; offset += element_size;) { Create: s' = extract_field <v_out2, offset> Create: s = op <s, s'> // For non SLP cases } */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using scalar code.\n"); vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); FOR_EACH_VEC_ELT (new_phis, i, new_phi) { int bit_offset; if (gimple_code (new_phi) == GIMPLE_PHI) vec_temp = PHI_RESULT (new_phi); else vec_temp = gimple_assign_lhs (new_phi); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, bitsize_zero_node); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); /* In SLP we don't need to apply reduction operation, so we just collect s' values in SCALAR_RESULTS. */ if (slp_reduc) scalar_results.safe_push (new_temp); for (bit_offset = element_bitsize; bit_offset < vec_size_in_bits; bit_offset += element_bitsize) { tree bitpos = bitsize_int (bit_offset); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, bitpos); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_name = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_name); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if (slp_reduc) { /* In SLP we don't need to apply reduction operation, so we just collect s' values in SCALAR_RESULTS. */ new_temp = new_name; scalar_results.safe_push (new_name); } else { epilog_stmt = gimple_build_assign (new_scalar_dest, code, new_name, new_temp); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } } } /* The only case where we need to reduce scalar results in SLP, is unrolling. If the size of SCALAR_RESULTS is greater than GROUP_SIZE, we reduce them combining elements modulo GROUP_SIZE. */ if (slp_reduc) { tree res, first_res, new_res; gimple *new_stmt; /* Reduce multiple scalar results in case of SLP unrolling. */ for (j = group_size; scalar_results.iterate (j, &res); j++) { first_res = scalar_results[j % group_size]; new_stmt = gimple_build_assign (new_scalar_dest, code, first_res, res); new_res = make_ssa_name (new_scalar_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_res); gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT); scalar_results[j % group_size] = new_res; } } else /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */ scalar_results.safe_push (new_temp); } } vect_finalize_reduction: if (double_reduc) loop = loop->inner; /* 2.5 Adjust the final result by the initial value of the reduction variable. (When such adjustment is not needed, then 'adjustment_def' is zero). For example, if code is PLUS we create: new_temp = loop_exit_def + adjustment_def */ if (adjustment_def) { gcc_assert (!slp_reduc); if (nested_in_vect_loop) { new_phi = new_phis[0]; gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE); expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def); new_dest = vect_create_destination_var (scalar_dest, vectype); } else { new_temp = scalar_results[0]; gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE); expr = build2 (code, scalar_type, new_temp, adjustment_def); new_dest = vect_create_destination_var (scalar_dest, scalar_type); } epilog_stmt = gimple_build_assign (new_dest, expr); new_temp = make_ssa_name (new_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if (nested_in_vect_loop) { set_vinfo_for_stmt (epilog_stmt, new_stmt_vec_info (epilog_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi)); if (!double_reduc) scalar_results.quick_push (new_temp); else scalar_results[0] = new_temp; } else scalar_results[0] = new_temp; new_phis[0] = epilog_stmt; } /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit phis with new adjusted scalar results, i.e., replace use <s_out0> with use <s_out4>. Transform: loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out0> use <s_out0> into: loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out4> use <s_out4> */ /* In SLP reduction chain we reduce vector results into one vector if necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of the last stmt in the reduction chain, since we are looking for the loop exit phi node. */ if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) { gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]; /* Handle reduction patterns. */ if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt))) dest_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt)); scalar_dest = gimple_assign_lhs (dest_stmt); group_size = 1; } /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in case that GROUP_SIZE is greater than vectorization factor). Therefore, we need to match SCALAR_RESULTS with corresponding statements. The first (GROUP_SIZE / number of new vector stmts) scalar results correspond to the first vector stmt, etc. (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */ if (group_size > new_phis.length ()) { ratio = group_size / new_phis.length (); gcc_assert (!(group_size % new_phis.length ())); } else ratio = 1; for (k = 0; k < group_size; k++) { if (k % ratio == 0) { epilog_stmt = new_phis[k / ratio]; reduction_phi = reduction_phis[k / ratio]; if (double_reduc) inner_phi = inner_phis[k / ratio]; } if (slp_reduc) { gimple *current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k]; orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt)); /* SLP statements can't participate in patterns. */ gcc_assert (!orig_stmt); scalar_dest = gimple_assign_lhs (current_stmt); } phis.create (3); /* Find the loop-closed-use at the loop exit of the original scalar result. (The reduction result is expected to have two immediate uses - one at the latch block, and one at the loop exit). */ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))) && !is_gimple_debug (USE_STMT (use_p))) phis.safe_push (USE_STMT (use_p)); /* While we expect to have found an exit_phi because of loop-closed-ssa form we can end up without one if the scalar cycle is dead. */ FOR_EACH_VEC_ELT (phis, i, exit_phi) { if (outer_loop) { stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi); gphi *vect_phi; /* FORNOW. Currently not supporting the case that an inner-loop reduction is not used in the outer-loop (but only outside the outer-loop), unless it is double reduction. */ gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo) && !STMT_VINFO_LIVE_P (exit_phi_vinfo)) || double_reduc); if (double_reduc) STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi; else STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt; if (!double_reduc || STMT_VINFO_DEF_TYPE (exit_phi_vinfo) != vect_double_reduction_def) continue; /* Handle double reduction: stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop) stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop) stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop) stmt4: s2 = phi <s4> - double reduction stmt (outer loop) At that point the regular reduction (stmt2 and stmt3) is already vectorized, as well as the exit phi node, stmt4. Here we vectorize the phi node of double reduction, stmt1, and update all relevant statements. */ /* Go through all the uses of s2 to find double reduction phi node, i.e., stmt1 above. */ orig_name = PHI_RESULT (exit_phi); FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) { stmt_vec_info use_stmt_vinfo; stmt_vec_info new_phi_vinfo; tree vect_phi_init, preheader_arg, vect_phi_res, init_def; basic_block bb = gimple_bb (use_stmt); gimple *use; /* Check that USE_STMT is really double reduction phi node. */ if (gimple_code (use_stmt) != GIMPLE_PHI || gimple_phi_num_args (use_stmt) != 2 || bb->loop_father != outer_loop) continue; use_stmt_vinfo = vinfo_for_stmt (use_stmt); if (!use_stmt_vinfo || STMT_VINFO_DEF_TYPE (use_stmt_vinfo) != vect_double_reduction_def) continue; /* Create vector phi node for double reduction: vs1 = phi <vs0, vs2> vs1 was created previously in this function by a call to vect_get_vec_def_for_operand and is stored in vec_initial_def; vs2 is defined by INNER_PHI, the vectorized EXIT_PHI; vs0 is created here. */ /* Create vector phi node. */ vect_phi = create_phi_node (vec_initial_def, bb); new_phi_vinfo = new_stmt_vec_info (vect_phi, loop_vec_info_for_loop (outer_loop)); set_vinfo_for_stmt (vect_phi, new_phi_vinfo); /* Create vs0 - initial def of the double reduction phi. */ preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt, loop_preheader_edge (outer_loop)); init_def = get_initial_def_for_reduction (stmt, preheader_arg, NULL); vect_phi_init = vect_init_vector (use_stmt, init_def, vectype, NULL); /* Update phi node arguments with vs0 and vs2. */ add_phi_arg (vect_phi, vect_phi_init, loop_preheader_edge (outer_loop), UNKNOWN_LOCATION); add_phi_arg (vect_phi, PHI_RESULT (inner_phi), loop_latch_edge (outer_loop), UNKNOWN_LOCATION); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "created double reduction phi node: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0); } vect_phi_res = PHI_RESULT (vect_phi); /* Replace the use, i.e., set the correct vs1 in the regular reduction phi node. FORNOW, NCOPIES is always 1, so the loop is redundant. */ use = reduction_phi; for (j = 0; j < ncopies; j++) { edge pr_edge = loop_preheader_edge (loop); SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res); use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use)); } } } } phis.release (); if (nested_in_vect_loop) { if (double_reduc) loop = outer_loop; else continue; } phis.create (3); /* Find the loop-closed-use at the loop exit of the original scalar result. (The reduction result is expected to have two immediate uses, one at the latch block, and one at the loop exit). For double reductions we are looking for exit phis of the outer loop. */ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) { if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))) { if (!is_gimple_debug (USE_STMT (use_p))) phis.safe_push (USE_STMT (use_p)); } else { if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI) { tree phi_res = PHI_RESULT (USE_STMT (use_p)); FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res) { if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (phi_use_p))) && !is_gimple_debug (USE_STMT (phi_use_p))) phis.safe_push (USE_STMT (phi_use_p)); } } } } FOR_EACH_VEC_ELT (phis, i, exit_phi) { /* Replace the uses: */ orig_name = PHI_RESULT (exit_phi); scalar_result = scalar_results[k]; FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) SET_USE (use_p, scalar_result); } phis.release (); } } /* Function is_nonwrapping_integer_induction. Check if STMT (which is part of loop LOOP) both increments and does not cause overflow. */ static bool is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo); tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo); tree lhs_type = TREE_TYPE (gimple_phi_result (stmt)); widest_int ni, max_loop_value, lhs_max; bool overflow = false; /* Make sure the loop is integer based. */ if (TREE_CODE (base) != INTEGER_CST || TREE_CODE (step) != INTEGER_CST) return false; /* Check that the induction increments. */ if (tree_int_cst_sgn (step) == -1) return false; /* Check that the max size of the loop will not wrap. */ if (TYPE_OVERFLOW_UNDEFINED (lhs_type)) return true; if (! max_stmt_executions (loop, &ni)) return false; max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type), &overflow); if (overflow) return false; max_loop_value = wi::add (wi::to_widest (base), max_loop_value, TYPE_SIGN (lhs_type), &overflow); if (overflow) return false; return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type)) <= TYPE_PRECISION (lhs_type)); } /* Function vectorizable_reduction. Check if STMT performs a reduction operation that can be vectorized. If VEC_STMT is also passed, vectorize the STMT: create a vectorized stmt to replace it, put it in VEC_STMT, and insert it at GSI. Return FALSE if not a vectorizable STMT, TRUE otherwise. This function also handles reduction idioms (patterns) that have been recognized in advance during vect_pattern_recog. In this case, STMT may be of this form: X = pattern_expr (arg0, arg1, ..., X) and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original sequence that had been detected and replaced by the pattern-stmt (STMT). This function also handles reduction of condition expressions, for example: for (int i = 0; i < N; i++) if (a[i] < value) last = a[i]; This is handled by vectorising the loop and creating an additional vector containing the loop indexes for which "a[i] < value" was true. In the function epilogue this is reduced to a single max value and then used to index into the vector of results. In some cases of reduction patterns, the type of the reduction variable X is different than the type of the other arguments of STMT. In such cases, the vectype that is used when transforming STMT into a vector stmt is different than the vectype that is used to determine the vectorization factor, because it consists of a different number of elements than the actual number of elements that are being operated upon in parallel. For example, consider an accumulation of shorts into an int accumulator. On some targets it's possible to vectorize this pattern operating on 8 shorts at a time (hence, the vectype for purposes of determining the vectorization factor should be V8HI); on the other hand, the vectype that is used to create the vector form is actually V4SI (the type of the result). Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that indicates what is the actual level of parallelism (V8HI in the example), so that the right vectorization factor would be derived. This vectype corresponds to the type of arguments to the reduction stmt, and should *NOT* be used to create the vectorized stmt. The right vectype for the vectorized stmt is obtained from the type of the result X: get_vectype_for_scalar_type (TREE_TYPE (X)) This means that, contrary to "regular" reductions (or "regular" stmts in general), the following equation: STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X)) does *NOT* necessarily hold for reduction patterns. */ bool vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, slp_tree slp_node) { tree vec_dest; tree scalar_dest; tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); tree vectype_in = NULL_TREE; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); enum tree_code code, orig_code, epilog_reduc_code; machine_mode vec_mode; int op_type; optab optab, reduc_optab; tree new_temp = NULL_TREE; gimple *def_stmt; enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type; gphi *new_phi = NULL; gimple *cond_reduc_def_stmt = NULL; tree scalar_type; bool is_simple_use; gimple *orig_stmt; stmt_vec_info orig_stmt_info; tree expr = NULL_TREE; int i; int ncopies; int epilog_copies; stmt_vec_info prev_stmt_info, prev_phi_info; bool single_defuse_cycle = false; tree reduc_def = NULL_TREE; gimple *new_stmt = NULL; int j; tree ops[3]; bool nested_cycle = false, found_nested_cycle_def = false; gimple *reduc_def_stmt = NULL; bool double_reduc = false, dummy; basic_block def_bb; struct loop * def_stmt_loop, *outer_loop = NULL; tree def_arg; gimple *def_arg_stmt; auto_vec<tree> vec_oprnds0; auto_vec<tree> vec_oprnds1; auto_vec<tree> vect_defs; auto_vec<gimple *> phis; int vec_num; tree def0, def1, tem, op1 = NULL_TREE; bool first_p = true; tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE; tree cond_reduc_val = NULL_TREE; /* In case of reduction chain we switch to the first stmt in the chain, but we don't update STMT_INFO, since only the last stmt is marked as reduction and has reduction properties. */ if (GROUP_FIRST_ELEMENT (stmt_info) && GROUP_FIRST_ELEMENT (stmt_info) != stmt) { stmt = GROUP_FIRST_ELEMENT (stmt_info); first_p = false; } if (nested_in_vect_loop_p (loop, stmt)) { outer_loop = loop; loop = loop->inner; nested_cycle = true; } /* 1. Is vectorizable reduction? */ /* Not supportable if the reduction variable is used in the loop, unless it's a reduction chain. */ if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer && !GROUP_FIRST_ELEMENT (stmt_info)) return false; /* Reductions that are not used even in an enclosing outer-loop, are expected to be "live" (used out of the loop). */ if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope && !STMT_VINFO_LIVE_P (stmt_info)) return false; /* Make sure it was already recognized as a reduction computation. */ if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_reduction_def && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_nested_cycle) return false; /* 2. Has this been recognized as a reduction pattern? Check if STMT represents a pattern that has been recognized in earlier analysis stages. For stmts that represent a pattern, the STMT_VINFO_RELATED_STMT field records the last stmt in the original sequence that constitutes the pattern. */ orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); if (orig_stmt) { orig_stmt_info = vinfo_for_stmt (orig_stmt); gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info)); gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info)); } /* 3. Check the operands of the operation. The first operands are defined inside the loop body. The last operand is the reduction variable, which is defined by the loop-header-phi. */ gcc_assert (is_gimple_assign (stmt)); /* Flatten RHS. */ switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) { case GIMPLE_SINGLE_RHS: op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)); if (op_type == ternary_op) { tree rhs = gimple_assign_rhs1 (stmt); ops[0] = TREE_OPERAND (rhs, 0); ops[1] = TREE_OPERAND (rhs, 1); ops[2] = TREE_OPERAND (rhs, 2); code = TREE_CODE (rhs); } else return false; break; case GIMPLE_BINARY_RHS: code = gimple_assign_rhs_code (stmt); op_type = TREE_CODE_LENGTH (code); gcc_assert (op_type == binary_op); ops[0] = gimple_assign_rhs1 (stmt); ops[1] = gimple_assign_rhs2 (stmt); break; case GIMPLE_TERNARY_RHS: code = gimple_assign_rhs_code (stmt); op_type = TREE_CODE_LENGTH (code); gcc_assert (op_type == ternary_op); ops[0] = gimple_assign_rhs1 (stmt); ops[1] = gimple_assign_rhs2 (stmt); ops[2] = gimple_assign_rhs3 (stmt); break; case GIMPLE_UNARY_RHS: return false; default: gcc_unreachable (); } /* The default is that the reduction variable is the last in statement. */ int reduc_index = op_type - 1; if (code == MINUS_EXPR) reduc_index = 0; if (code == COND_EXPR && slp_node) return false; scalar_dest = gimple_assign_lhs (stmt); scalar_type = TREE_TYPE (scalar_dest); if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type) && !SCALAR_FLOAT_TYPE_P (scalar_type)) return false; /* Do not try to vectorize bit-precision reductions. */ if ((TYPE_PRECISION (scalar_type) != GET_MODE_PRECISION (TYPE_MODE (scalar_type)))) return false; /* All uses but the last are expected to be defined in the loop. The last use is the reduction variable. In case of nested cycle this assumption is not true: we use reduc_index to record the index of the reduction variable. */ for (i = 0; i < op_type; i++) { if (i == reduc_index) continue; /* The condition of COND_EXPR is checked in vectorizable_condition(). */ if (i == 0 && code == COND_EXPR) continue; is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, &dt, &tem); if (!vectype_in) vectype_in = tem; gcc_assert (is_simple_use); if (dt != vect_internal_def && dt != vect_external_def && dt != vect_constant_def && dt != vect_induction_def && !(dt == vect_nested_cycle && nested_cycle)) return false; if (dt == vect_nested_cycle) { found_nested_cycle_def = true; reduc_def_stmt = def_stmt; reduc_index = i; } if (i == 1 && code == COND_EXPR) { /* Record how value of COND_EXPR is defined. */ if (dt == vect_constant_def) { cond_reduc_dt = dt; cond_reduc_val = ops[i]; } if (dt == vect_induction_def && def_stmt != NULL && is_nonwrapping_integer_induction (def_stmt, loop)) { cond_reduc_dt = dt; cond_reduc_def_stmt = def_stmt; } } } is_simple_use = vect_is_simple_use (ops[reduc_index], loop_vinfo, &def_stmt, &dt, &tem); if (!vectype_in) vectype_in = tem; gcc_assert (is_simple_use); if (!found_nested_cycle_def) reduc_def_stmt = def_stmt; if (reduc_def_stmt && gimple_code (reduc_def_stmt) != GIMPLE_PHI) return false; if (!(dt == vect_reduction_def || dt == vect_nested_cycle || ((dt == vect_internal_def || dt == vect_external_def || dt == vect_constant_def || dt == vect_induction_def) && nested_cycle && found_nested_cycle_def))) { /* For pattern recognized stmts, orig_stmt might be a reduction, but some helper statements for the pattern might not, or might be COND_EXPRs with reduction uses in the condition. */ gcc_assert (orig_stmt); return false; } enum vect_reduction_type v_reduc_type; gimple *tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt, !nested_cycle, &dummy, false, &v_reduc_type); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type; /* If we have a condition reduction, see if we can simplify it further. */ if (v_reduc_type == COND_REDUCTION) { if (cond_reduc_dt == vect_induction_def) { stmt_vec_info cond_stmt_vinfo = vinfo_for_stmt (cond_reduc_def_stmt); tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo); gcc_assert (TREE_CODE (base) == INTEGER_CST); cond_reduc_val = NULL_TREE; /* Find a suitable value below base; punt if base is the minimum value of the type for now. */ if (tree_int_cst_sgn (base) == 1) cond_reduc_val = build_int_cst (TREE_TYPE (base), 0); else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)), base)) cond_reduc_val = int_const_binop (MINUS_EXPR, base, integer_one_node); if (cond_reduc_val) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "condition expression based on " "integer induction.\n"); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = INTEGER_INDUC_COND_REDUCTION; } } /* Loop peeling modifies initial value of reduction PHI, which makes the reduction stmt to be transformed different to the original stmt analyzed. We need to record reduction code for CONST_COND_REDUCTION type reduction at analyzing stage, thus it can be used directly at transform stage. */ if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR) { /* Also set the reduction type to CONST_COND_REDUCTION. */ gcc_assert (cond_reduc_dt == vect_constant_def); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION; } else if (cond_reduc_dt == vect_constant_def) { enum vect_def_type cond_initial_dt; gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]); tree cond_initial_val = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop)); gcc_assert (cond_reduc_val != NULL_TREE); vect_is_simple_use (cond_initial_val, loop_vinfo, &def_stmt, &cond_initial_dt); if (cond_initial_dt == vect_constant_def && types_compatible_p (TREE_TYPE (cond_initial_val), TREE_TYPE (cond_reduc_val))) { tree e = fold_build2 (LE_EXPR, boolean_type_node, cond_initial_val, cond_reduc_val); if (e && (integer_onep (e) || integer_zerop (e))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "condition expression based on " "compile time constant.\n"); /* Record reduction code at analysis stage. */ STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) = integer_onep (e) ? MAX_EXPR : MIN_EXPR; STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION; } } } } if (orig_stmt) gcc_assert (tmp == orig_stmt || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == orig_stmt); else /* We changed STMT to be the first stmt in reduction chain, hence we check that in this case the first element in the chain is STMT. */ gcc_assert (stmt == tmp || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt); if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt))) return false; if (slp_node) ncopies = 1; else ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo) / TYPE_VECTOR_SUBPARTS (vectype_in)); gcc_assert (ncopies >= 1); vec_mode = TYPE_MODE (vectype_in); if (code == COND_EXPR) { /* Only call during the analysis stage, otherwise we'll lose STMT_VINFO_TYPE. */ if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported condition in reduction\n"); return false; } } else { /* 4. Supportable by target? */ if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR || code == RROTATE_EXPR) { /* Shifts and rotates are only supported by vectorizable_shifts, not vectorizable_reduction. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported shift or rotation.\n"); return false; } /* 4.1. check support for the operation in the loop */ optab = optab_for_tree_code (code, vectype_in, optab_default); if (!optab) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no optab.\n"); return false; } if (optab_handler (optab, vec_mode) == CODE_FOR_nothing) { if (dump_enabled_p ()) dump_printf (MSG_NOTE, "op not supported by target.\n"); if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD || LOOP_VINFO_VECT_FACTOR (loop_vinfo) < vect_min_worthwhile_factor (code)) return false; if (dump_enabled_p ()) dump_printf (MSG_NOTE, "proceeding using word mode.\n"); } /* Worthwhile without SIMD support? */ if (!VECTOR_MODE_P (TYPE_MODE (vectype_in)) && LOOP_VINFO_VECT_FACTOR (loop_vinfo) < vect_min_worthwhile_factor (code)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not worthwhile without SIMD support.\n"); return false; } } /* 4.2. Check support for the epilog operation. If STMT represents a reduction pattern, then the type of the reduction variable may be different than the type of the rest of the arguments. For example, consider the case of accumulation of shorts into an int accumulator; The original code: S1: int_a = (int) short_a; orig_stmt-> S2: int_acc = plus <int_a ,int_acc>; was replaced with: STMT: int_acc = widen_sum <short_a, int_acc> This means that: 1. The tree-code that is used to create the vector operation in the epilog code (that reduces the partial results) is not the tree-code of STMT, but is rather the tree-code of the original stmt from the pattern that STMT is replacing. I.e, in the example above we want to use 'widen_sum' in the loop, but 'plus' in the epilog. 2. The type (mode) we use to check available target support for the vector operation to be created in the *epilog*, is determined by the type of the reduction variable (in the example above we'd check this: optab_handler (plus_optab, vect_int_mode])). However the type (mode) we use to check available target support for the vector operation to be created *inside the loop*, is determined by the type of the other arguments to STMT (in the example we'd check this: optab_handler (widen_sum_optab, vect_short_mode)). This is contrary to "regular" reductions, in which the types of all the arguments are the same as the type of the reduction variable. For "regular" reductions we can therefore use the same vector type (and also the same tree-code) when generating the epilog code and when generating the code inside the loop. */ if (orig_stmt) { /* This is a reduction pattern: get the vectype from the type of the reduction variable, and get the tree-code from orig_stmt. */ gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION); orig_code = gimple_assign_rhs_code (orig_stmt); gcc_assert (vectype_out); vec_mode = TYPE_MODE (vectype_out); } else { /* Regular reduction: use the same vectype and tree-code as used for the vector code inside the loop can be used for the epilog code. */ orig_code = code; if (code == MINUS_EXPR) orig_code = PLUS_EXPR; /* For simple condition reductions, replace with the actual expression we want to base our reduction around. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == CONST_COND_REDUCTION) { orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info); gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR); } else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) orig_code = MAX_EXPR; } if (nested_cycle) { def_bb = gimple_bb (reduc_def_stmt); def_stmt_loop = def_bb->loop_father; def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt, loop_preheader_edge (def_stmt_loop)); if (TREE_CODE (def_arg) == SSA_NAME && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg)) && gimple_code (def_arg_stmt) == GIMPLE_PHI && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt)) && vinfo_for_stmt (def_arg_stmt) && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt)) == vect_double_reduction_def) double_reduc = true; } epilog_reduc_code = ERROR_MARK; if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) != COND_REDUCTION) { if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code)) { reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out, optab_default); if (!reduc_optab) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no optab for reduction.\n"); epilog_reduc_code = ERROR_MARK; } else if (optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduc op not supported by target.\n"); epilog_reduc_code = ERROR_MARK; } /* When epilog_reduc_code is ERROR_MARK then a reduction will be generated in the epilog using multiple expressions. This does not work for condition reductions. */ if (epilog_reduc_code == ERROR_MARK && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION || STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == CONST_COND_REDUCTION)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no reduc code for scalar code.\n"); return false; } } else { if (!nested_cycle || double_reduc) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no reduc code for scalar code.\n"); return false; } } } else { int scalar_precision = GET_MODE_PRECISION (TYPE_MODE (scalar_type)); cr_index_scalar_type = make_unsigned_type (scalar_precision); cr_index_vector_type = build_vector_type (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype_out)); epilog_reduc_code = REDUC_MAX_EXPR; optab = optab_for_tree_code (REDUC_MAX_EXPR, cr_index_vector_type, optab_default); if (optab_handler (optab, TYPE_MODE (cr_index_vector_type)) == CODE_FOR_nothing) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduc max op not supported by target.\n"); return false; } } if ((double_reduc || STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) != TREE_CODE_REDUCTION) && ncopies > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "multiple types in double reduction or condition " "reduction.\n"); return false; } /* In case of widenning multiplication by a constant, we update the type of the constant to be the type of the other operand. We check that the constant fits the type in the pattern recognition pass. */ if (code == DOT_PROD_EXPR && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1]))) { if (TREE_CODE (ops[0]) == INTEGER_CST) ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]); else if (TREE_CODE (ops[1]) == INTEGER_CST) ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]); else { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "invalid types in dot-prod\n"); return false; } } if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) { widest_int ni; if (! max_loop_iterations (loop, &ni)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop count not known, cannot create cond " "reduction.\n"); return false; } /* Convert backedges to iterations. */ ni += 1; /* The additional index will be the same type as the condition. Check that the loop can fit into this less one (because we'll use up the zero slot for when there are no matches). */ tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type); if (wi::geu_p (ni, wi::to_widest (max_index))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop size is greater than data size.\n"); return false; } } if (!vec_stmt) /* transformation not required. */ { if (first_p && !vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies, reduc_index)) return false; STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type; return true; } /** Transform. **/ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n"); /* FORNOW: Multiple types are not supported for condition. */ if (code == COND_EXPR) gcc_assert (ncopies == 1); /* Create the destination vector */ vec_dest = vect_create_destination_var (scalar_dest, vectype_out); /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. */ /* If the reduction is used in an outer loop we need to generate VF intermediate results, like so (e.g. for ncopies=2): r0 = phi (init, r0) r1 = phi (init, r1) r0 = x0 + r0; r1 = x1 + r1; (i.e. we generate VF results in 2 registers). In this case we have a separate def-use cycle for each copy, and therefore for each copy we get the vector def for the reduction variable from the respective phi node created for this copy. Otherwise (the reduction is unused in the loop nest), we can combine together intermediate results, like so (e.g. for ncopies=2): r = phi (init, r) r = x0 + r; r = x1 + r; (i.e. we generate VF/2 results in a single register). In this case for each copy we get the vector def for the reduction variable from the vectorized reduction operation generated in the previous iteration. */ if (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live) { single_defuse_cycle = true; epilog_copies = 1; } else epilog_copies = ncopies; prev_stmt_info = NULL; prev_phi_info = NULL; if (slp_node) vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); else { vec_num = 1; vec_oprnds0.create (1); if (op_type == ternary_op) vec_oprnds1.create (1); } phis.create (vec_num); vect_defs.create (vec_num); if (!slp_node) vect_defs.quick_push (NULL_TREE); for (j = 0; j < ncopies; j++) { if (j == 0 || !single_defuse_cycle) { for (i = 0; i < vec_num; i++) { /* Create the reduction-phi that defines the reduction operand. */ new_phi = create_phi_node (vec_dest, loop->header); set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo)); if (j == 0 || slp_node) phis.quick_push (new_phi); } } if (code == COND_EXPR) { gcc_assert (!slp_node); vectorizable_condition (stmt, gsi, vec_stmt, PHI_RESULT (phis[0]), reduc_index, NULL); /* Multiple types are not supported for condition. */ break; } /* Handle uses. */ if (j == 0) { if (slp_node) { /* Get vec defs for all the operands except the reduction index, ensuring the ordering of the ops in the vector is kept. */ auto_vec<tree, 3> slp_ops; auto_vec<vec<tree>, 3> vec_defs; slp_ops.quick_push (reduc_index == 0 ? NULL : ops[0]); slp_ops.quick_push (reduc_index == 1 ? NULL : ops[1]); if (op_type == ternary_op) slp_ops.quick_push (reduc_index == 2 ? NULL : ops[2]); vect_get_slp_defs (slp_ops, slp_node, &vec_defs, -1); vec_oprnds0.safe_splice (vec_defs[reduc_index == 0 ? 1 : 0]); vec_defs[reduc_index == 0 ? 1 : 0].release (); if (op_type == ternary_op) { vec_oprnds1.safe_splice (vec_defs[reduc_index == 2 ? 1 : 2]); vec_defs[reduc_index == 2 ? 1 : 2].release (); } } else { loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index], stmt); vec_oprnds0.quick_push (loop_vec_def0); if (op_type == ternary_op) { op1 = reduc_index == 0 ? ops[2] : ops[1]; loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt); vec_oprnds1.quick_push (loop_vec_def1); } } } else { if (!slp_node) { enum vect_def_type dt; gimple *dummy_stmt; vect_is_simple_use (ops[!reduc_index], loop_vinfo, &dummy_stmt, &dt); loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0); vec_oprnds0[0] = loop_vec_def0; if (op_type == ternary_op) { vect_is_simple_use (op1, loop_vinfo, &dummy_stmt, &dt); loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1); vec_oprnds1[0] = loop_vec_def1; } } if (single_defuse_cycle) reduc_def = gimple_assign_lhs (new_stmt); STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi; } FOR_EACH_VEC_ELT (vec_oprnds0, i, def0) { if (slp_node) reduc_def = PHI_RESULT (phis[i]); else { if (!single_defuse_cycle || j == 0) reduc_def = PHI_RESULT (new_phi); } def1 = ((op_type == ternary_op) ? vec_oprnds1[i] : NULL); if (op_type == binary_op) { if (reduc_index == 0) expr = build2 (code, vectype_out, reduc_def, def0); else expr = build2 (code, vectype_out, def0, reduc_def); } else { if (reduc_index == 0) expr = build3 (code, vectype_out, reduc_def, def0, def1); else { if (reduc_index == 1) expr = build3 (code, vectype_out, def0, reduc_def, def1); else expr = build3 (code, vectype_out, def0, def1, reduc_def); } } new_stmt = gimple_build_assign (vec_dest, expr); new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); vect_finish_stmt_generation (stmt, new_stmt, gsi); if (slp_node) { SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); vect_defs.quick_push (new_temp); } else vect_defs[0] = new_temp; } if (slp_node) continue; if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; else STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; prev_stmt_info = vinfo_for_stmt (new_stmt); prev_phi_info = vinfo_for_stmt (new_phi); } tree indx_before_incr, indx_after_incr, cond_name = NULL; /* Finalize the reduction-phi (set its arguments) and create the epilog reduction code. */ if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node) { new_temp = gimple_assign_lhs (*vec_stmt); vect_defs[0] = new_temp; /* For cond reductions we want to create a new vector (INDEX_COND_EXPR) which is updated with the current index of the loop for every match of the original loop's cond_expr (VEC_STMT). This results in a vector containing the last time the condition passed for that vector lane. The first match will be a 1 to allow 0 to be used for non-matching indexes. If there are no matches at all then the vector will be all zeroes. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) { int nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); int k; gcc_assert (gimple_assign_rhs_code (*vec_stmt) == VEC_COND_EXPR); /* First we create a simple vector induction variable which starts with the values {1,2,3,...} (SERIES_VECT) and increments by the vector size (STEP). */ /* Create a {1,2,3,...} vector. */ tree *vtemp = XALLOCAVEC (tree, nunits_out); for (k = 0; k < nunits_out; ++k) vtemp[k] = build_int_cst (cr_index_scalar_type, k + 1); tree series_vect = build_vector (cr_index_vector_type, vtemp); /* Create a vector of the step value. */ tree step = build_int_cst (cr_index_scalar_type, nunits_out); tree vec_step = build_vector_from_val (cr_index_vector_type, step); /* Create an induction variable. */ gimple_stmt_iterator incr_gsi; bool insert_after; standard_iv_increment_position (loop, &incr_gsi, &insert_after); create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr); /* Next create a new phi node vector (NEW_PHI_TREE) which starts filled with zeros (VEC_ZERO). */ /* Create a vector of 0s. */ tree zero = build_zero_cst (cr_index_scalar_type); tree vec_zero = build_vector_from_val (cr_index_vector_type, zero); /* Create a vector phi node. */ tree new_phi_tree = make_ssa_name (cr_index_vector_type); new_phi = create_phi_node (new_phi_tree, loop->header); set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo)); add_phi_arg (new_phi, vec_zero, loop_preheader_edge (loop), UNKNOWN_LOCATION); /* Now take the condition from the loops original cond_expr (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for every match uses values from the induction variable (INDEX_BEFORE_INCR) otherwise uses values from the phi node (NEW_PHI_TREE). Finally, we update the phi (NEW_PHI_TREE) to take the value of the new cond_expr (INDEX_COND_EXPR). */ /* Duplicate the condition from vec_stmt. */ tree ccompare = unshare_expr (gimple_assign_rhs1 (*vec_stmt)); /* Create a conditional, where the condition is taken from vec_stmt (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and else is the phi (NEW_PHI_TREE). */ tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type, ccompare, indx_before_incr, new_phi_tree); cond_name = make_ssa_name (cr_index_vector_type); gimple *index_condition = gimple_build_assign (cond_name, index_cond_expr); gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT); stmt_vec_info index_vec_info = new_stmt_vec_info (index_condition, loop_vinfo); STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type; set_vinfo_for_stmt (index_condition, index_vec_info); /* Update the phi with the vec cond. */ add_phi_arg (new_phi, cond_name, loop_latch_edge (loop), UNKNOWN_LOCATION); } } vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies, epilog_reduc_code, phis, reduc_index, double_reduc, slp_node, cond_name, cond_reduc_val); return true; } /* Function vect_min_worthwhile_factor. For a loop where we could vectorize the operation indicated by CODE, return the minimum vectorization factor that makes it worthwhile to use generic vectors. */ int vect_min_worthwhile_factor (enum tree_code code) { switch (code) { case PLUS_EXPR: case MINUS_EXPR: case NEGATE_EXPR: return 4; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_NOT_EXPR: return 2; default: return INT_MAX; } } /* Function vectorizable_induction Check if PHI performs an induction computation that can be vectorized. If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized phi to replace it, put it in VEC_STMT, and add it to the same basic block. Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool vectorizable_induction (gimple *phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, gimple **vec_stmt) { stmt_vec_info stmt_info = vinfo_for_stmt (phi); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); int nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; tree vec_def; gcc_assert (ncopies >= 1); /* FORNOW. These restrictions should be relaxed. */ if (nested_in_vect_loop_p (loop, phi)) { imm_use_iterator imm_iter; use_operand_p use_p; gimple *exit_phi; edge latch_e; tree loop_arg; if (ncopies > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "multiple types in nested loop.\n"); return false; } exit_phi = NULL; latch_e = loop_latch_edge (loop->inner); loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt))) { exit_phi = use_stmt; break; } } if (exit_phi) { stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi); if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo) && !STMT_VINFO_LIVE_P (exit_phi_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "inner-loop induction only used outside " "of the outer vectorized loop.\n"); return false; } } } if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; /* FORNOW: SLP not supported. */ if (STMT_SLP_TYPE (stmt_info)) return false; gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def); if (gimple_code (phi) != GIMPLE_PHI) return false; if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_induction ===\n"); vect_model_induction_cost (stmt_info, ncopies); return true; } /** Transform. **/ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n"); vec_def = get_initial_def_for_induction (phi); *vec_stmt = SSA_NAME_DEF_STMT (vec_def); return true; } /* Function vectorizable_live_operation. STMT computes a value that is used outside the loop. Check if it can be supported. */ bool vectorizable_live_operation (gimple *stmt, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, slp_tree slp_node, int slp_index, gimple **vec_stmt) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); imm_use_iterator imm_iter; tree lhs, lhs_type, bitsize, vec_bitsize; tree vectype = STMT_VINFO_VECTYPE (stmt_info); int nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; gimple *use_stmt; auto_vec<tree> vec_oprnds; gcc_assert (STMT_VINFO_LIVE_P (stmt_info)); if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def) return false; /* FORNOW. CHECKME. */ if (nested_in_vect_loop_p (loop, stmt)) return false; /* If STMT is not relevant and it is a simple assignment and its inputs are invariant then it can remain in place, unvectorized. The original last scalar value that it computes will be used. */ if (!STMT_VINFO_RELEVANT_P (stmt_info)) { gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo)); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "statement is simple and uses invariant. Leaving in " "place.\n"); return true; } if (!vec_stmt) /* No transformation required. */ return true; /* If stmt has a related stmt, then use that for getting the lhs. */ if (is_pattern_stmt_p (stmt_info)) stmt = STMT_VINFO_RELATED_STMT (stmt_info); lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt) : gimple_get_lhs (stmt); lhs_type = TREE_TYPE (lhs); bitsize = TYPE_SIZE (TREE_TYPE (vectype)); vec_bitsize = TYPE_SIZE (vectype); /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */ tree vec_lhs, bitstart; if (slp_node) { gcc_assert (slp_index >= 0); int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length (); int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); /* Get the last occurrence of the scalar index from the concatenation of all the slp vectors. Calculate which slp vector it is and the index within. */ int pos = (num_vec * nunits) - num_scalar + slp_index; int vec_entry = pos / nunits; int vec_index = pos % nunits; /* Get the correct slp vectorized stmt. */ vec_lhs = gimple_get_lhs (SLP_TREE_VEC_STMTS (slp_node)[vec_entry]); /* Get entry to use. */ bitstart = build_int_cst (unsigned_type_node, vec_index); bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart); } else { enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info); vec_lhs = vect_get_vec_def_for_operand_1 (stmt, dt); /* For multiple copies, get the last copy. */ for (int i = 1; i < ncopies; ++i) vec_lhs = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, vec_lhs); /* Get the last lane in the vector. */ bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize); } /* Create a new vectorized stmt for the uses of STMT and insert outside the loop. */ gimple_seq stmts = NULL; tree bftype = TREE_TYPE (vectype); if (VECTOR_BOOLEAN_TYPE_P (vectype)) bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1); tree new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart); new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree), &stmts, true, NULL_TREE); if (stmts) gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts); /* Replace use of lhs with newly computed result. If the use stmt is a single arg PHI, just replace all uses of PHI result. It's necessary because lcssa PHI defining lhs may be before newly inserted stmt. */ use_operand_p use_p; FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs) if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)) && !is_gimple_debug (use_stmt)) { if (gimple_code (use_stmt) == GIMPLE_PHI && gimple_phi_num_args (use_stmt) == 1) { replace_uses_by (gimple_phi_result (use_stmt), new_tree); } else { FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) SET_USE (use_p, new_tree); } update_stmt (use_stmt); } return true; } /* Kill any debug uses outside LOOP of SSA names defined in STMT. */ static void vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt) { ssa_op_iter op_iter; imm_use_iterator imm_iter; def_operand_p def_p; gimple *ustmt; FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF) { FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p)) { basic_block bb; if (!is_gimple_debug (ustmt)) continue; bb = gimple_bb (ustmt); if (!flow_bb_inside_loop_p (loop, bb)) { if (gimple_debug_bind_p (ustmt)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "killing debug use\n"); gimple_debug_bind_reset_value (ustmt); update_stmt (ustmt); } else gcc_unreachable (); } } } } /* Given loop represented by LOOP_VINFO, return true if computation of LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false otherwise. */ static bool loop_niters_no_overflow (loop_vec_info loop_vinfo) { /* Constant case. */ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo); tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo); gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST); gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST); if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters)) return true; } widest_int max; struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); /* Check the upper bound of loop niters. */ if (get_max_loop_iterations (loop, &max)) { tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)); signop sgn = TYPE_SIGN (type); widest_int type_max = widest_int::from (wi::max_value (type), sgn); if (max < type_max) return true; } return false; } /* Scale profiling counters by estimation for LOOP which is vectorized by factor VF. */ static void scale_profile_for_vect_loop (struct loop *loop, unsigned vf) { edge preheader = loop_preheader_edge (loop); /* Reduce loop iterations by the vectorization factor. */ gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf); gcov_type freq_h = loop->header->count, freq_e = preheader->count; /* Use frequency only if counts are zero. */ if (freq_h == 0 && freq_e == 0) { freq_h = loop->header->frequency; freq_e = EDGE_FREQUENCY (preheader); } if (freq_h != 0) { gcov_type scale; /* Avoid dropping loop body profile counter to 0 because of zero count in loop's preheader. */ freq_e = MAX (freq_e, 1); /* This should not overflow. */ scale = GCOV_COMPUTE_SCALE (freq_e * (new_est_niter + 1), freq_h); scale_loop_frequencies (loop, scale, REG_BR_PROB_BASE); } basic_block exit_bb = single_pred (loop->latch); edge exit_e = single_exit (loop); exit_e->count = loop_preheader_edge (loop)->count; exit_e->probability = REG_BR_PROB_BASE / (new_est_niter + 1); edge exit_l = single_pred_edge (loop->latch); int prob = exit_l->probability; exit_l->probability = REG_BR_PROB_BASE - exit_e->probability; exit_l->count = exit_bb->count - exit_e->count; if (exit_l->count < 0) exit_l->count = 0; if (prob > 0) scale_bbs_frequencies_int (&loop->latch, 1, exit_l->probability, prob); } /* Function vect_transform_loop. The analysis phase has determined that the loop is vectorizable. Vectorize the loop - created vectorized stmts to replace the scalar stmts in the loop, and update the loop exit condition. Returns scalar epilogue loop if any. */ struct loop * vect_transform_loop (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); struct loop *epilogue = NULL; basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; int i; tree niters_vector = NULL; int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); bool grouped_store; bool slp_scheduled = false; gimple *stmt, *pattern_stmt; gimple_seq pattern_def_seq = NULL; gimple_stmt_iterator pattern_def_si = gsi_none (); bool transform_pattern_stmt = false; bool check_profitability = false; int th; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n"); /* Use the more conservative vectorization threshold. If the number of iterations is constant assume the cost check has been performed by our caller. If the threshold makes all loops profitable that run at least the vectorization factor number of times checking is pointless, too. */ th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo); if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Profitability threshold is %d loop iterations.\n", th); check_profitability = true; } /* Make sure there exists a single-predecessor exit bb. Do this before versioning. */ edge e = single_exit (loop); if (! single_pred_p (e->dest)) { split_loop_exit_edge (e); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "split exit edge\n"); } /* Version the loop first, if required, so the profitability check comes first. */ if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) { vect_loop_versioning (loop_vinfo, th, check_profitability); check_profitability = false; } /* Make sure there exists a single-predecessor exit bb also on the scalar loop copy. Do this after versioning but before peeling so CFG structure is fine for both scalar and if-converted loop to make slpeel_duplicate_current_defs_from_edges face matched loop closed PHI nodes on the exit. */ if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)) { e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)); if (! single_pred_p (e->dest)) { split_loop_exit_edge (e); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "split exit edge of scalar loop\n"); } } tree niters = vect_build_loop_niters (loop_vinfo); LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters; tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo)); bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo); epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector, th, check_profitability, niters_no_overflow); if (niters_vector == NULL_TREE) { if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) niters_vector = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)), LOOP_VINFO_INT_NITERS (loop_vinfo) / vf); else vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector, niters_no_overflow); } /* 1) Make sure the loop header has exactly two entries 2) Make sure we have a preheader basic block. */ gcc_assert (EDGE_COUNT (loop->header->preds) == 2); split_edge (loop_preheader_edge (loop)); /* FORNOW: the vectorizer supports only loops which body consist of one basic block (header + empty latch). When the vectorizer will support more involved loop forms, the order by which the BBs are traversed need to be reconsidered. */ for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; stmt_vec_info stmt_info; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gphi *phi = si.phi (); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "------>vectorizing phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } stmt_info = vinfo_for_stmt (phi); if (!stmt_info) continue; if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) vect_loop_kill_debug_uses (loop, phi); if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) continue; if (STMT_VINFO_VECTYPE (stmt_info) && (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)) != (unsigned HOST_WIDE_INT) vf) && dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n"); if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n"); vect_transform_stmt (phi, NULL, NULL, NULL, NULL); } } pattern_stmt = NULL; for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;) { bool is_store; if (transform_pattern_stmt) stmt = pattern_stmt; else { stmt = gsi_stmt (si); /* During vectorization remove existing clobber stmts. */ if (gimple_clobber_p (stmt)) { unlink_stmt_vdef (stmt); gsi_remove (&si, true); release_defs (stmt); continue; } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "------>vectorizing statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } stmt_info = vinfo_for_stmt (stmt); /* vector stmts created in the outer-loop during vectorization of stmts in an inner-loop may not have a stmt_info, and do not need to be vectorized. */ if (!stmt_info) { gsi_next (&si); continue; } if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) vect_loop_kill_debug_uses (loop, stmt); if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) { if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) { stmt = pattern_stmt; stmt_info = vinfo_for_stmt (stmt); } else { gsi_next (&si); continue; } } else if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) transform_pattern_stmt = true; /* If pattern statement has def stmts, vectorize them too. */ if (is_pattern_stmt_p (stmt_info)) { if (pattern_def_seq == NULL) { pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); pattern_def_si = gsi_start (pattern_def_seq); } else if (!gsi_end_p (pattern_def_si)) gsi_next (&pattern_def_si); if (pattern_def_seq != NULL) { gimple *pattern_def_stmt = NULL; stmt_vec_info pattern_def_stmt_info = NULL; while (!gsi_end_p (pattern_def_si)) { pattern_def_stmt = gsi_stmt (pattern_def_si); pattern_def_stmt_info = vinfo_for_stmt (pattern_def_stmt); if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) break; gsi_next (&pattern_def_si); } if (!gsi_end_p (pattern_def_si)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> vectorizing pattern def " "stmt: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); } stmt = pattern_def_stmt; stmt_info = pattern_def_stmt_info; } else { pattern_def_si = gsi_none (); transform_pattern_stmt = false; } } else transform_pattern_stmt = false; } if (STMT_VINFO_VECTYPE (stmt_info)) { unsigned int nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); if (!STMT_SLP_TYPE (stmt_info) && nunits != (unsigned int) vf && dump_enabled_p ()) /* For SLP VF is set according to unrolling factor, and not to vector size, hence for SLP this print is not valid. */ dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n"); } /* SLP. Schedule all the SLP instances when the first SLP stmt is reached. */ if (STMT_SLP_TYPE (stmt_info)) { if (!slp_scheduled) { slp_scheduled = true; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== scheduling SLP instances ===\n"); vect_schedule_slp (loop_vinfo); } /* Hybrid SLP stmts must be vectorized in addition to SLP. */ if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info)) { if (!transform_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } } /* -------- vectorize statement ------------ */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n"); grouped_store = false; is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL); if (is_store) { if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) { /* Interleaving. If IS_STORE is TRUE, the vectorization of the interleaving chain was completed - free all the stores in the chain. */ gsi_next (&si); vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info)); } else { /* Free the attached stmt_vec_info and remove the stmt. */ gimple *store = gsi_stmt (si); free_stmt_vec_info (store); unlink_stmt_vdef (store); gsi_remove (&si, true); release_defs (store); } /* Stores can only appear at the end of pattern statements. */ gcc_assert (!transform_pattern_stmt); pattern_def_seq = NULL; } else if (!transform_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } } /* stmts in BB */ } /* BBs in loop */ slpeel_make_loop_iterate_ntimes (loop, niters_vector); scale_profile_for_vect_loop (loop, vf); /* The minimum number of iterations performed by the epilogue. This is 1 when peeling for gaps because we always need a final scalar iteration. */ int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0; /* +1 to convert latch counts to loop iteration counts, -min_epilogue_iters to remove iterations that cannot be performed by the vector code. */ int bias = 1 - min_epilogue_iters; /* In these calculations the "- 1" converts loop iteration counts back to latch counts. */ if (loop->any_upper_bound) loop->nb_iterations_upper_bound = wi::udiv_floor (loop->nb_iterations_upper_bound + bias, vf) - 1; if (loop->any_likely_upper_bound) loop->nb_iterations_likely_upper_bound = wi::udiv_floor (loop->nb_iterations_likely_upper_bound + bias, vf) - 1; if (loop->any_estimate) loop->nb_iterations_estimate = wi::udiv_floor (loop->nb_iterations_estimate + bias, vf) - 1; if (dump_enabled_p ()) { if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)) { dump_printf_loc (MSG_NOTE, vect_location, "LOOP VECTORIZED\n"); if (loop->inner) dump_printf_loc (MSG_NOTE, vect_location, "OUTER LOOP VECTORIZED\n"); dump_printf (MSG_NOTE, "\n"); } else dump_printf_loc (MSG_NOTE, vect_location, "LOOP EPILOGUE VECTORIZED (VS=%d)\n", current_vector_size); } /* Free SLP instances here because otherwise stmt reference counting won't work. */ slp_instance instance; FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance) vect_free_slp_instance (instance); LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); /* Clear-up safelen field since its value is invalid after vectorization since vectorized loop can have loop-carried dependencies. */ loop->safelen = 0; /* Don't vectorize epilogue for epilogue. */ if (LOOP_VINFO_EPILOGUE_P (loop_vinfo)) epilogue = NULL; if (epilogue) { unsigned int vector_sizes = targetm.vectorize.autovectorize_vector_sizes (); vector_sizes &= current_vector_size - 1; if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK)) epilogue = NULL; else if (!vector_sizes) epilogue = NULL; else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0) { int smallest_vec_size = 1 << ctz_hwi (vector_sizes); int ratio = current_vector_size / smallest_vec_size; int eiters = LOOP_VINFO_INT_NITERS (loop_vinfo) - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); eiters = eiters % vf; epilogue->nb_iterations_upper_bound = eiters - 1; if (eiters < vf / ratio) epilogue = NULL; } } if (epilogue) { epilogue->force_vectorize = loop->force_vectorize; epilogue->safelen = loop->safelen; epilogue->dont_vectorize = false; /* We may need to if-convert epilogue to vectorize it. */ if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)) tree_if_conversion (epilogue); } return epilogue; } /* The code below is trying to perform simple optimization - revert if-conversion for masked stores, i.e. if the mask of a store is zero do not perform it and all stored value producers also if possible. For example, for (i=0; i<n; i++) if (c[i]) { p1[i] += 1; p2[i] = p3[i] +2; } this transformation will produce the following semi-hammock: if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 }) { vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165); vect__12.22_172 = vect__11.19_170 + vect_cst__171; MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172); vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165); vect__19.28_184 = vect__18.25_182 + vect_cst__183; MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184); } */ void optimize_mask_stores (struct loop *loop) { basic_block *bbs = get_loop_body (loop); unsigned nbbs = loop->num_nodes; unsigned i; basic_block bb; struct loop *bb_loop; gimple_stmt_iterator gsi; gimple *stmt; auto_vec<gimple *> worklist; vect_location = find_loop_location (loop); /* Pick up all masked stores in loop if any. */ for (i = 0; i < nbbs; i++) { bb = bbs[i]; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) worklist.safe_push (stmt); } } free (bbs); if (worklist.is_empty ()) return; /* Loop has masked stores. */ while (!worklist.is_empty ()) { gimple *last, *last_store; edge e, efalse; tree mask; basic_block store_bb, join_bb; gimple_stmt_iterator gsi_to; tree vdef, new_vdef; gphi *phi; tree vectype; tree zero; last = worklist.pop (); mask = gimple_call_arg (last, 2); bb = gimple_bb (last); /* Create then_bb and if-then structure in CFG, then_bb belongs to the same loop as if_bb. It could be different to LOOP when two level loop-nest is vectorized and mask_store belongs to the inner one. */ e = split_block (bb, last); bb_loop = bb->loop_father; gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop)); join_bb = e->dest; store_bb = create_empty_bb (bb); add_bb_to_loop (store_bb, bb_loop); e->flags = EDGE_TRUE_VALUE; efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE); /* Put STORE_BB to likely part. */ efalse->probability = PROB_UNLIKELY; store_bb->frequency = PROB_ALWAYS - EDGE_FREQUENCY (efalse); make_edge (store_bb, join_bb, EDGE_FALLTHRU); if (dom_info_available_p (CDI_DOMINATORS)) set_immediate_dominator (CDI_DOMINATORS, store_bb, bb); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Create new block %d to sink mask stores.", store_bb->index); /* Create vector comparison with boolean result. */ vectype = TREE_TYPE (mask); zero = build_zero_cst (vectype); stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE); gsi = gsi_last_bb (bb); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); /* Create new PHI node for vdef of the last masked store: .MEM_2 = VDEF <.MEM_1> will be converted to .MEM.3 = VDEF <.MEM_1> and new PHI node will be created in join bb .MEM_2 = PHI <.MEM_1, .MEM_3> */ vdef = gimple_vdef (last); new_vdef = make_ssa_name (gimple_vop (cfun), last); gimple_set_vdef (last, new_vdef); phi = create_phi_node (vdef, join_bb); add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION); /* Put all masked stores with the same mask to STORE_BB if possible. */ while (true) { gimple_stmt_iterator gsi_from; gimple *stmt1 = NULL; /* Move masked store to STORE_BB. */ last_store = last; gsi = gsi_for_stmt (last); gsi_from = gsi; /* Shift GSI to the previous stmt for further traversal. */ gsi_prev (&gsi); gsi_to = gsi_start_bb (store_bb); gsi_move_before (&gsi_from, &gsi_to); /* Setup GSI_TO to the non-empty block start. */ gsi_to = gsi_start_bb (store_bb); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Move stmt to created bb\n"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0); } /* Move all stored value producers if possible. */ while (!gsi_end_p (gsi)) { tree lhs; imm_use_iterator imm_iter; use_operand_p use_p; bool res; /* Skip debug statements. */ if (is_gimple_debug (gsi_stmt (gsi))) { gsi_prev (&gsi); continue; } stmt1 = gsi_stmt (gsi); /* Do not consider statements writing to memory or having volatile operand. */ if (gimple_vdef (stmt1) || gimple_has_volatile_ops (stmt1)) break; gsi_from = gsi; gsi_prev (&gsi); lhs = gimple_get_lhs (stmt1); if (!lhs) break; /* LHS of vectorized stmt must be SSA_NAME. */ if (TREE_CODE (lhs) != SSA_NAME) break; if (!VECTOR_TYPE_P (TREE_TYPE (lhs))) { /* Remove dead scalar statement. */ if (has_zero_uses (lhs)) { gsi_remove (&gsi_from, true); continue; } } /* Check that LHS does not have uses outside of STORE_BB. */ res = true; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) { gimple *use_stmt; use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (gimple_bb (use_stmt) != store_bb) { res = false; break; } } if (!res) break; if (gimple_vuse (stmt1) && gimple_vuse (stmt1) != gimple_vuse (last_store)) break; /* Can move STMT1 to STORE_BB. */ if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Move stmt to created bb\n"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0); } gsi_move_before (&gsi_from, &gsi_to); /* Shift GSI_TO for further insertion. */ gsi_prev (&gsi_to); } /* Put other masked stores with the same mask to STORE_BB. */ if (worklist.is_empty () || gimple_call_arg (worklist.last (), 2) != mask || worklist.last () != stmt1) break; last = worklist.pop (); } add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION); } }
cc_bmm_bg_op.h
#ifndef CAFFE2_FB_OPERATORS_CC_BMM_BG_H_ #define CAFFE2_FB_OPERATORS_CC_BMM_BG_H_ #include "caffe2/core/context.h" #include "caffe2/core/operator.h" #include "caffe2/core/types.h" #include "caffe2/utils/math.h" namespace caffe2 { using T = float; using TInd = int; using Engine = DefaultEngine; template <class Context> class ConcatBatchMatMulBatchGatherOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; ConcatBatchMatMulBatchGatherOp(const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws) {} bool RunOnDevice() override; protected: int axis_ = 1; int add_axis_ = 1; bool trans_a_ = 0; bool trans_b_ = 1; bool broadcast_ = 0; }; template <class Context> bool ConcatBatchMatMulBatchGatherOp<Context>::RunOnDevice() { auto& indices = Input(0); auto& input_zero = Input(1); int adj_size = input_zero.dim() + 1; int canonical_axis = 1; CAFFE_ENFORCE_LT(canonical_axis, adj_size, "Axis not in input ndim range."); for (const auto i : c10::irange(2, InputSize())) { CAFFE_ENFORCE( Input(i).dtype() == input_zero.dtype(), "All inputs must have the same type, expected: ", input_zero.dtype().name(), " but got: ", Input(i).dtype().name(), " for input: ", i); } int before = 1, after = 1; for (const auto i : c10::irange(input_zero.dim())) { int dim = input_zero.dim32(i); if (i < canonical_axis) { before *= dim; } else { // i > canonical_axis || i == canonical_axis && add_axis_ after *= dim; } // check the input dims are compatible. for (const auto j : c10::irange(2, InputSize())) { int dim_j = Input(j).dim32(i); CAFFE_ENFORCE( dim == dim_j, "Expect dimension = ", dim, " got ", dim_j, " at axis = ", i, " for input: ", j, ". The input tensors can only have different dimensions " "when arg 'add_axis' = 0 and along the axis = ", canonical_axis, " <", input_zero.sizes(), "> vs <", Input(j).sizes(), ">."); } } auto ndata = InputSize() - 1; auto batch_size = before; auto embed_size = after; auto gather_size = indices.sizes()[0]; vector<int64_t> output_dims; output_dims.push_back(batch_size); output_dims.insert( output_dims.begin() + 1, indices.sizes().begin(), indices.sizes().end()); auto* output = Output(0, output_dims, at::dtype<T>()); // std::stringstream ss; // ss << "["; // for (const auto i : c10::irange(output_dims.size()))ss << output_dims[i]; // ss << "]"; // LOG(INFO) << "output size: " << ss.str(); auto* output_data = output->template mutable_data<T>(); auto* indices_data = indices.template data<TInd>(); #pragma omp parallel { std::vector<T> scratch_input(ndata * embed_size); std::vector<T> scratch_output(ndata * ndata); #pragma omp for for (int b = 0; b < batch_size; ++b) { // concat input to scratch for (const auto i : c10::irange(1, InputSize())) { auto* input_data = Input(i).template data<T>(); memcpy( &scratch_input[(i - 1) * embed_size], input_data + b * embed_size, embed_size * Input(i).itemsize()); } // call mkl gemm math::Gemm<T, Context, Engine>( CblasNoTrans, CblasTrans, ndata, ndata, embed_size, 1, &scratch_input[0], &scratch_input[0], 0, &scratch_output[0], &context_); // do gather int64_t output_offset = b * gather_size; for (const auto i : c10::irange(gather_size)) { output_data[output_offset + i] = scratch_output[indices_data[i]]; } } } return true; } } // namespace caffe2 #endif // CAFFE2_FB_OPERATORS_CC_BMM_BG_H_
symm_x_dia_u_hi_col_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { #ifdef COMPLEX ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT j = 0; j < columns; j++) for (ALPHA_INT i = 0; i < mat->rows; i++){ alpha_mul(y[index2(j,i,ldy)],y[index2(j,i,ldy)],beta); alpha_madde(y[index2(j,i,ldy)],x[index2(j,i,ldx)],alpha); } #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Number* Y = &y[index2(cc,0,ldy)]; const ALPHA_Number* X = &x[index2(cc,0,ldx)]; for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d > 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number val; alpha_mul_2c(val,mat->values[index2(di,ar,mat->lval)],alpha); alpha_madde(Y[ar],val,X[ac]); alpha_madde(Y[ac],val,X[ar]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
GB_unaryop_transpose.c
//------------------------------------------------------------------------------ // GB_unaryop_transpose: C=op(cast(A')), transpose, typecast, and apply op //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // This method is parallel, but not highly scalable. It uses only naslice = // nnz(A)/(A->vlen) threads. Each thread requires O(vlen) workspace. { // Ax unused for some uses of this template #include "GB_unused.h" //-------------------------------------------------------------------------- // get A and C //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ai = A->i ; #if defined ( GB_PHASE_2_OF_2 ) const GB_ATYPE *GB_RESTRICT Ax = A->x ; // int64_t *GB_RESTRICT Cp = C->p ; int64_t *GB_RESTRICT Ci = C->i ; GB_CTYPE *GB_RESTRICT Cx = C->x ; #endif //-------------------------------------------------------------------------- // C = op (cast (A')) //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(naslice) schedule(static) for (taskid = 0 ; taskid < naslice ; taskid++) { // get the rowcount for this slice, of size A->vlen int64_t *GB_RESTRICT rowcount = Rowcounts [taskid] ; for (int64_t Iter_k = A_slice [taskid] ; Iter_k < A_slice [taskid+1] ; Iter_k++) { GBI_jth_iteration_with_iter (Iter, j, pA, pA_end) ; for ( ; pA < pA_end ; pA++) { #if defined ( GB_PHASE_1_OF_2) // count one more entry in C(i,:) for this slice rowcount [Ai [pA]]++ ; #else // insert the entry into C(i,:) for this slice int64_t pC = rowcount [Ai [pA]]++ ; Ci [pC] = j ; // Cx [pC] = op (cast (Ax [pA])) GB_CAST_OP (pC, pA) ; #endif } } } }
vector.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include <stdlib.h> #include <string.h> #include "../error/error.h" #include <numa.h> /** * Initialize a new value vector * * @param vec a valid pointer to an uninitialized sptValueVector variable, * @param len number of values to create * @param cap total number of values to reserve * * Vector is a type of one-dimentional array with dynamic length */ int sptNewValueVector(sptValueVector *vec, sptNnzIndex len, sptNnzIndex cap) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = malloc(cap * sizeof *vec->data); spt_CheckOSError(!vec->data, "ValVec New"); memset(vec->data, 0, cap * sizeof *vec->data); return 0; } //Numa int sptNewValueVectorNuma(sptValueVector *vec, sptNnzIndex len, sptNnzIndex cap, int nume_node) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = numa_alloc_onnode(cap * sizeof *vec->data, nume_node); //spt_CheckOSError(!vec->data, "ValVec New"); //memset(vec->data, 0, cap * sizeof *vec->data); return 0; } /** * Fill an existed dense value vector with a specified constant * * @param vec a valid pointer to an existed sptVector variable, * @param val a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int sptConstantValueVector(sptValueVector * const vec, sptValue const val) { for(sptNnzIndex i=0; i<vec->len; ++i) vec->data[i] = val; return 0; } /** * Copy a value vector to an uninitialized value vector * * @param dest a pointer to an uninitialized value vector * @param src a pointer to an existing valid value vector * * The contents of `src` will be copied to `dest`. */ int sptCopyValueVector(sptValueVector *dest, const sptValueVector *src, int const nt) { int result = sptNewValueVector(dest, src->len, src->len); spt_CheckError(result, "ValVec Copy", NULL); #ifdef PARTI_USE_OPENMP #pragma omp parallel for num_threads(nt) for (sptNnzIndex i=0; i<src->len; ++i) { dest->data[i] = src->data[i]; } #else memcpy(dest->data, src->data, src->len * sizeof *src->data); #endif return 0; } /** * Add a value to the end of a value vector * * @param vec a pointer to a valid value vector * @param value the value to be appended * * The length of the value vector will be changed to contain the new value. */ int sptAppendValueVector(sptValueVector *vec, sptValue const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE sptNnzIndex newcap = vec->cap + vec->cap/2; #else sptNnzIndex newcap = vec->len+1; #endif sptValue *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "ValVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } //numa int sptAppendValueVectorNuma(sptValueVector *vec, sptValue const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE sptNnzIndex newcap = vec->cap + vec->cap/2; #else sptNnzIndex newcap = vec->len+1; #endif sptValue *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "ValVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } /** * Add a value to the end of a value vector * * @param vec a pointer to a valid value vector * @param append_vec a pointer to another value vector, containing the values to be appended * * The values from `append_vec` will be appended to `vec`. */ int sptAppendValueVectorWithVector(sptValueVector *vec, const sptValueVector *append_vec) { sptNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { sptNnzIndex newcap = vec->cap + append_vec->cap; sptValue *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "ValVec Append ValVec"); vec->cap = newcap; vec->data = newdata; } for(sptNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } // With numa int sptAppendValueVectorWithVectorNuma(sptValueVector *vec, const sptValueVector *append_vec) { sptNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { sptNnzIndex newcap = vec->cap + append_vec->cap; sptValue *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "ValVec Append ValVec"); vec->cap = newcap; vec->data = newdata; } for(sptNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } // With numa and start location int sptAppendValueVectorWithVectorStartFromNuma(sptValueVector *vec, const sptValueVector *append_vec, unsigned long long start) { for(sptNnzIndex i=0; i<append_vec->len; ++i) { vec->data[start + i] = append_vec->data[i]; } return 0; } /** * Resize a value vector * * @param vec the value vector to resize * @param size the new size of the value vector * * If the new size is larger than the current size, new values will be appended * but the values of them are undefined. If the new size if smaller than the * current size, values at the end will be truncated. */ int sptResizeValueVector(sptValueVector *vec, sptNnzIndex const size) { sptNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { sptValue *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "ValVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } //numa int sptResizeValueVectorNuma(sptValueVector *vec, sptNnzIndex const size) { sptNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { sptValue *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "ValVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } /** * Release the memory buffer a value vector is holding * * @param vec a pointer to a valid value vector * */ void sptFreeValueVector(sptValueVector *vec) { vec->len = 0; vec->cap = 0; free(vec->data); } /* * Initialize a new sptIndex vector * * @param vec a valid pointer to an uninitialized sptIndex variable, * @param len number of values to create * @param cap total number of values to reserve * * Vector is a type of one-dimentional array with dynamic length */ int sptNewIndexVector(sptIndexVector *vec, sptNnzIndex len, sptNnzIndex cap) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = malloc(cap * sizeof *vec->data); spt_CheckOSError(!vec->data, "IdxVec New"); memset(vec->data, 0, cap * sizeof *vec->data); return 0; } //Numa int sptNewIndexVectorNuma(sptIndexVector *vec, sptNnzIndex len, sptNnzIndex cap, int numa_node) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = numa_alloc_onnode(cap * sizeof *vec->data, numa_node); //spt_CheckOSError(!vec->data, "IdxVec New"); //memset(vec->data, 0, cap * sizeof *vec->data); return 0; } /** * Fill an existed dense index vector with a specified constant * * @param vec a valid pointer to an existed sptIndexVector variable, * @param num a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int sptConstantIndexVector(sptIndexVector * const vec, sptIndex const num) { for(sptNnzIndex i=0; i<vec->len; ++i) vec->data[i] = num; return 0; } /** * Copy an index vector to an uninitialized index vector * * @param dest a pointer to an uninitialized index vector * @param src a pointer to an existing valid index vector * * The contents of `src` will be copied to `dest`. */ int sptCopyIndexVector(sptIndexVector *dest, const sptIndexVector *src, int const nt) { int result = sptNewIndexVector(dest, src->len, src->len); spt_CheckError(result, "IdxVec Copy", NULL); #ifdef PARTI_USE_OPENMP #pragma omp parallel for num_threads(nt) for (sptNnzIndex i=0; i<src->len; ++i) { dest->data[i] = src->data[i]; } #else memcpy(dest->data, src->data, src->len * sizeof *src->data); #endif return 0; } /** * Add a value to the end of a sptIndexVector * * @param vec a pointer to a valid index vector * @param value the value to be appended * * The length of the size vector will be changed to contain the new value. */ int sptAppendIndexVector(sptIndexVector *vec, sptIndex const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE sptNnzIndex newcap = vec->cap + vec->cap/2; #else sptNnzIndex newcap = vec->len+1; #endif sptIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "IdxVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } //numa int sptAppendIndexVectorNuma(sptIndexVector *vec, sptIndex const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE sptNnzIndex newcap = vec->cap + vec->cap/2; #else sptNnzIndex newcap = vec->len+1; #endif sptIndex *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "IdxVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } /** * Add a value to the end of an index vector * * @param vec a pointer to a valid index vector * @param append_vec a pointer to another index vector, containing the values to be appended * * The values from `append_vec` will be appended to `vec`. */ int sptAppendIndexVectorWithVector(sptIndexVector *vec, const sptIndexVector *append_vec) { sptNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { sptNnzIndex newcap = vec->cap + append_vec->cap; sptIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "IdxVec Append IdxVec"); vec->cap = newcap; vec->data = newdata; } for(sptNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } //numa int sptAppendIndexVectorWithVectorNuma(sptIndexVector *vec, const sptIndexVector *append_vec) { sptNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { sptNnzIndex newcap = vec->cap + append_vec->cap; sptIndex *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "IdxVec Append IdxVec"); vec->cap = newcap; vec->data = newdata; } for(sptNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } //numa int sptAppendIndexVectorWithVectorStartFromNuma(sptIndexVector *vec, const sptIndexVector *append_vec, unsigned long long start) { for(sptNnzIndex i=0; i<append_vec->len; ++i) { vec->data[start + i] = append_vec->data[i]; } return 0; } /** * Resize an index vector * * @param vec the index vector to resize * @param size the new size of the index vector * * If the new size is larger than the current size, new values will be appended * but the values of them are undefined. If the new size if smaller than the * current size, values at the end will be truncated. */ int sptResizeIndexVector(sptIndexVector *vec, sptNnzIndex const size) { sptNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { sptIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "IdxVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } //numa int sptResizeIndexVectorNuma(sptIndexVector *vec, sptNnzIndex const size) { sptNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { sptIndex *newdata = numa_realloc(vec->data, vec->cap * sizeof *vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "IdxVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } long int sptInIndexVector(sptIndexVector * inds, sptNnzIndex nmodes, sptNnzIndex nnz, sptIndexVector * cand_inds) { int mark; for (sptNnzIndex i = 0; i < nnz; ++i) { mark = 1; for(sptIndex m = 0; m < nmodes; ++m) { if(cand_inds->data[m] != inds[m].data[i] ) { mark = 0; break; // no need to compare other modes } } if (mark == 1) return i; } return -1; } /** * Release the memory buffer a sptIndexVector is holding * * @param vec a pointer to a valid size vector * */ void sptFreeIndexVector(sptIndexVector *vec) { free(vec->data); vec->len = 0; vec->cap = 0; } /* * Initialize a new sptElementIndexVector vector * * @param vec a valid pointer to an uninitialized sptElementIndex variable, * @param len number of values to create * @param cap total number of values to reserve * * Vector is a type of one-dimentional array with dynamic length */ int sptNewElementIndexVector(sptElementIndexVector *vec, sptNnzIndex len, sptNnzIndex cap) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = malloc(cap * sizeof *vec->data); spt_CheckOSError(!vec->data, "EleIdxVec New"); memset(vec->data, 0, cap * sizeof *vec->data); return 0; } /** * Fill an existed dense element index vector with a specified constant * * @param vec a valid pointer to an existed sptElementIndexVector variable, * @param num a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int sptConstantElementIndexVector(sptElementIndexVector * const vec, sptElementIndex const num) { for(sptNnzIndex i=0; i<vec->len; ++i) vec->data[i] = num; return 0; } /** * Copy an element index vector to an uninitialized element index vector * * @param dest a pointer to an uninitialized element index vector * @param src a pointer to an existing valid element index vector * * The contents of `src` will be copied to `dest`. */ int sptCopyElementIndexVector(sptElementIndexVector *dest, const sptElementIndexVector *src) { int result = sptNewElementIndexVector(dest, src->len, src->len); spt_CheckError(result, "EleIdxVec Copy", NULL); memcpy(dest->data, src->data, src->len * sizeof *src->data); return 0; } /** * Add a value to the end of a sptElementIndexVector * * @param vec a pointer to a valid element index vector * @param value the value to be appended * * The length of the element index vector will be changed to contain the new value. */ int sptAppendElementIndexVector(sptElementIndexVector *vec, sptElementIndex const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE sptNnzIndex newcap = vec->cap + vec->cap/2; #else sptNnzIndex newcap = vec->len+1; #endif sptElementIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "EleIdxVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } /** * Add a value to the end of an element index vector * * @param vec a pointer to a valid element index vector * @param append_vec a pointer to another element index vector, containing the values to be appended * * The values from `append_vec` will be appended to `vec`. */ int sptAppendElementIndexVectorWithVector(sptElementIndexVector *vec, const sptElementIndexVector *append_vec) { sptNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { sptNnzIndex newcap = vec->cap + append_vec->cap; sptElementIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "EleIdxVec Append EleIdxVec"); vec->cap = newcap; vec->data = newdata; } for(sptNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } /** * Resize a element index vector * * @param vec the element index vector to resize * @param size the new size of the element index vector * * If the new size is larger than the current size, new values will be appended * but the values of them are undefined. If the new size if smaller than the * current size, values at the end will be truncated. */ int sptResizeElementIndexVector(sptElementIndexVector *vec, sptNnzIndex const size) { sptNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { sptElementIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "EleIdxVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } /** * Release the memory buffer a sptElementIndexVector is holding * * @param vec a pointer to a valid size vector * */ void sptFreeElementIndexVector(sptElementIndexVector *vec) { free(vec->data); vec->len = 0; vec->cap = 0; } /* * Initialize a new sptBlockIndexVector vector * * @param vec a valid pointer to an uninitialized sptBlockIndex variable, * @param len number of values to create * @param cap total number of values to reserve * * Vector is a type of one-dimentional array with dynamic length */ int sptNewBlockIndexVector(sptBlockIndexVector *vec, sptNnzIndex len, sptNnzIndex cap) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = malloc(cap * sizeof *vec->data); spt_CheckOSError(!vec->data, "BlkIdxVec New"); memset(vec->data, 0, cap * sizeof *vec->data); return 0; } /** * Fill an existed dense element index vector with a specified constant * * @param vec a valid pointer to an existed sptBlockIndexVector variable, * @param num a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int sptConstantBlockIndexVector(sptBlockIndexVector * const vec, sptBlockIndex const num) { for(sptNnzIndex i=0; i<vec->len; ++i) vec->data[i] = num; return 0; } /** * Copy a block index vector to an uninitialized block index vector * * @param dest a pointer to an uninitialized block index vector * @param src a pointer to an existing valid block index vector * * The contents of `src` will be copied to `dest`. */ int sptCopyBlockIndexVector(sptBlockIndexVector *dest, const sptBlockIndexVector *src) { int result = sptNewBlockIndexVector(dest, src->len, src->len); spt_CheckError(result, "BlkIdxVec Copy", NULL); memcpy(dest->data, src->data, src->len * sizeof *src->data); return 0; } /** * Add a value to the end of a sptBlockIndexVector * * @param vec a pointer to a valid block index vector * @param value the value to be appended * * The length of the block index vector will be changed to contain the new value. */ int sptAppendBlockIndexVector(sptBlockIndexVector *vec, sptBlockIndex const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE sptNnzIndex newcap = vec->cap + vec->cap/2; #else sptNnzIndex newcap = vec->len+1; #endif sptBlockIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "BlkIdxVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } /** * Add a value to the end of a block index vector * * @param vec a pointer to a valid block index vector * @param append_vec a pointer to another block index vector, containing the values to be appended * * The values from `append_vec` will be appended to `vec`. */ int sptAppendBlockIndexVectorWithVector(sptBlockIndexVector *vec, const sptBlockIndexVector *append_vec) { sptNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { sptNnzIndex newcap = vec->cap + append_vec->cap; sptBlockIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "BlkIdxVec Append BlkIdxVec"); vec->cap = newcap; vec->data = newdata; } for(sptNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } /** * Resize a block index vector * * @param vec the block index vector to resize * @param size the new size of the block index vector * * If the new size is larger than the current size, new values will be appended * but the values of them are undefined. If the new size if smaller than the * current size, values at the end will be truncated. */ int sptResizeBlockIndexVector(sptBlockIndexVector *vec, sptNnzIndex const size) { sptNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { sptBlockIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "BlkIdxVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } /** * Release the memory buffer a sptBlockIndexVector is holding * * @param vec a pointer to a valid size vector * */ void sptFreeBlockIndexVector(sptBlockIndexVector *vec) { free(vec->data); vec->len = 0; vec->cap = 0; } /* * Initialize a new sptNnzIndexVector vector * * @param vec a valid pointer to an uninitialized sptNnzIndex variable, * @param len number of values to create * @param cap total number of values to reserve * * Vector is a type of one-dimentional array with dynamic length */ int sptNewNnzIndexVector(sptNnzIndexVector *vec, sptNnzIndex len, sptNnzIndex cap) { if(cap < len) { cap = len; } if(cap < 2) { cap = 2; } vec->len = len; vec->cap = cap; vec->data = malloc(cap * sizeof *vec->data); spt_CheckOSError(!vec->data, "NnzIdxVec New"); memset(vec->data, 0, cap * sizeof *vec->data); return 0; } /** * Fill an existed dense long nnz index vector with a specified constant * * @param vec a valid pointer to an existed sptNnzIndexVector variable, * @param num a given value constant * * Vector is a type of one-dimentional array with dynamic length */ int sptConstantNnzIndexVector(sptNnzIndexVector * const vec, sptNnzIndex const num) { for(sptNnzIndex i=0; i<vec->len; ++i) vec->data[i] = num; return 0; } /** * Copy a long nnz index vector to an uninitialized long nnz index vector * * @param dest a pointer to an uninitialized long nnz index vector * @param src a pointer to an existing valid long nnz index vector * * The contents of `src` will be copied to `dest`. */ int sptCopyNnzIndexVector(sptNnzIndexVector *dest, const sptNnzIndexVector *src) { int result = sptNewNnzIndexVector(dest, src->len, src->len); spt_CheckError(result, "NnzIdxVec Copy", NULL); memcpy(dest->data, src->data, src->len * sizeof *src->data); return 0; } /** * Add a value to the end of a sptNnzIndexVector * * @param vec a pointer to a valid long nnz index vector * @param value the value to be appended * * The length of the long nnz index vector will be changed to contain the new value. */ int sptAppendNnzIndexVector(sptNnzIndexVector *vec, sptNnzIndex const value) { if(vec->cap <= vec->len) { #ifndef MEMCHECK_MODE sptNnzIndex newcap = vec->cap + vec->cap/2; #else sptNnzIndex newcap = vec->len+1; #endif sptNnzIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "NnzIdxVec Append"); vec->cap = newcap; vec->data = newdata; } vec->data[vec->len] = value; ++vec->len; return 0; } /** * Add a value to the end of a long nnz index vector * * @param vec a pointer to a valid long nnz index vector * @param append_vec a pointer to another long nnz index vector, containing the values to be appended * * The values from `append_vec` will be appended to `vec`. */ int sptAppendNnzIndexVectorWithVector(sptNnzIndexVector *vec, const sptNnzIndexVector *append_vec) { sptNnzIndex newlen = vec->len + append_vec->len; if(vec->cap <= newlen) { sptNnzIndex newcap = vec->cap + append_vec->cap; sptNnzIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "NnzIdxVec Append NnzIdxVec"); vec->cap = newcap; vec->data = newdata; } for(sptNnzIndex i=0; i<append_vec->len; ++i) { vec->data[vec->len + i] = append_vec->data[i]; } vec->len = newlen; return 0; } /** * Resize a long nnz index vector * * @param vec the long nnz index vector to resize * @param size the new size of the long nnz index vector * * If the new size is larger than the current size, new values will be appended * but the values of them are undefined. If the new size if smaller than the * current size, values at the end will be truncated. */ int sptResizeNnzIndexVector(sptNnzIndexVector *vec, sptNnzIndex const size) { sptNnzIndex newcap = size < 2 ? 2 : size; if(newcap != vec->cap) { sptNnzIndex *newdata = realloc(vec->data, newcap * sizeof *vec->data); spt_CheckOSError(!newdata, "NnzIdxVec Resize"); vec->len = size; vec->cap = newcap; vec->data = newdata; } else { vec->len = size; } return 0; } /** * Release the memory buffer a sptNnzIndexVector is holding * * @param vec a pointer to a valid long nnz vector * */ void sptFreeNnzIndexVector(sptNnzIndexVector *vec) { free(vec->data); vec->len = 0; vec->cap = 0; }
GB_unop__atanh_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__atanh_fp64_fp64) // op(A') function: GB (_unop_tran__atanh_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = atanh (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atanh (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = atanh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATANH || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__atanh_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = atanh (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = atanh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__atanh_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB068-restrictpointer2-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The restrict type qualifier is an indication to the compiler that, if the memory addressed by the restrict -qualified pointer is modified, no other pointer will access that same memory. If a particular chunk of memory is not modified, it can be aliased through more than one restricted pointer. A C99 restrict feature. For gcc, you must use -std=c99 to compile this program. */ #include <stdlib.h> #include <stdio.h> void init(int n, int * restrict a, int * restrict b, int * restrict c) { int i; #pragma cetus private(i) #pragma loop name init#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<n; i ++ ) { a[i]=1; b[i]=i; c[i]=(i*i); } return ; } void foo(int n, int * restrict a, int * restrict b, int * restrict c) { int i; #pragma cetus private(i) #pragma loop name foo#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<n; i ++ ) { a[i]=(b[i]+c[i]); } return ; } void print(int n, int * restrict a, int * restrict b, int * restrict c) { int i; #pragma cetus private(i) #pragma loop name print#0 for (i=0; i<n; i ++ ) { printf("%d %d %d\n", a[i], b[i], c[i]); } return ; } int main() { int n = 1000; int * a, * b, * c; int _ret_val_0; a=((int * )malloc(n*sizeof (int))); if (a==0) { fprintf(stderr, "skip the execution due to malloc failures.\n"); _ret_val_0=1; return _ret_val_0; } b=((int * )malloc(n*sizeof (int))); if (b==0) { fprintf(stderr, "skip the execution due to malloc failures.\n"); _ret_val_0=1; return _ret_val_0; } c=((int * )malloc(n*sizeof (int))); if (c==0) { fprintf(stderr, "skip the execution due to malloc failures.\n"); _ret_val_0=1; return _ret_val_0; } init(n, a, b, c); foo(n, a, b, c); print(n, a, b, c); free(a); free(b); free(c); _ret_val_0=0; return _ret_val_0; }
GB_binop__pow_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_int8) // A.*B function (eWiseMult): GB (_AemultB_08__pow_int8) // A.*B function (eWiseMult): GB (_AemultB_02__pow_int8) // A.*B function (eWiseMult): GB (_AemultB_04__pow_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_int8) // C+=b function (dense accum): GB (_Cdense_accumb__pow_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int8) // C=scalar+B GB (_bind1st__pow_int8) // C=scalar+B' GB (_bind1st_tran__pow_int8) // C=A+scalar GB (_bind2nd__pow_int8) // C=A'+scalar GB (_bind2nd_tran__pow_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_pow_int8 (aij, bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_int8 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT8 || GxB_NO_POW_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_int8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_int8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int8 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int8 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__pow_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_int32) // A.*B function (eWiseMult): GB (_AemultB_08__pow_int32) // A.*B function (eWiseMult): GB (_AemultB_02__pow_int32) // A.*B function (eWiseMult): GB (_AemultB_04__pow_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_int32) // C+=b function (dense accum): GB (_Cdense_accumb__pow_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int32) // C=scalar+B GB (_bind1st__pow_int32) // C=scalar+B' GB (_bind1st_tran__pow_int32) // C=A+scalar GB (_bind2nd__pow_int32) // C=A'+scalar GB (_bind2nd_tran__pow_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = GB_pow_int32 (aij, bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_int32 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT32 || GxB_NO_POW_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_int32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_int32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int32 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int32 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sort.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* * Original code from the Cilk project * * Copyright (c) 2000 Massachusetts Institute of Technology * Copyright (c) 2000 Matteo Frigo */ /* * this program uses an algorithm that we call `cilksort'. * The algorithm is essentially mergesort: * * cilksort(in[1..n]) = * spawn cilksort(in[1..n/2], tmp[1..n/2]) * spawn cilksort(in[n/2..n], tmp[n/2..n]) * sync * spawn cilkmerge(tmp[1..n/2], tmp[n/2..n], in[1..n]) * * * The procedure cilkmerge does the following: * * cilkmerge(A[1..n], B[1..m], C[1..(n+m)]) = * find the median of A \union B using binary * search. The binary search gives a pair * (ma, mb) such that ma + mb = (n + m)/2 * and all elements in A[1..ma] are smaller than * B[mb..m], and all the B[1..mb] are smaller * than all elements in A[ma..n]. * * spawn cilkmerge(A[1..ma], B[1..mb], C[1..(n+m)/2]) * spawn cilkmerge(A[ma..m], B[mb..n], C[(n+m)/2 .. (n+m)]) * sync * * The algorithm appears for the first time (AFAIK) in S. G. Akl and * N. Santoro, "Optimal Parallel Merging and Sorting Without Memory * Conflicts", IEEE Trans. Comp., Vol. C-36 No. 11, Nov. 1987 . The * paper does not express the algorithm using recursion, but the * idea of finding the median is there. * * For cilksort of n elements, T_1 = O(n log n) and * T_\infty = O(log^3 n). There is a way to shave a * log factor in the critical path (left as homework). */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "bots.h" #include "app-desc.h" typedef long ELM; ELM *array, *tmp; static unsigned long rand_nxt = 0; static inline unsigned long my_rand(void) { rand_nxt = rand_nxt * 1103515245 + 12345; return rand_nxt; } static inline void my_srand(unsigned long seed) { rand_nxt = seed; } static inline ELM med3(ELM a, ELM b, ELM c) { if (a < b) { if (b < c) { return b; } else { if (a < c) return c; else return a; } } else { if (b > c) { return b; } else { if (a > c) return c; else return a; } } } /* * simple approach for now; a better median-finding * may be preferable */ static inline ELM choose_pivot(ELM *low, ELM *high) { return med3(*low, *high, low[(high - low) / 2]); } static ELM *seqpart(ELM *low, ELM *high) { ELM pivot; ELM h, l; ELM *curr_low = low; ELM *curr_high = high; pivot = choose_pivot(low, high); while (1) { while ((h = *curr_high) > pivot) curr_high--; while ((l = *curr_low) < pivot) curr_low++; if (curr_low >= curr_high) break; *curr_high-- = l; *curr_low++ = h; } /* * I don't know if this is really necessary. * The problem is that the pivot is not always the * first element, and the partition may be trivial. * However, if the partition is trivial, then * *high is the largest element, whence the following * code. */ if (curr_high < high) return curr_high; else return curr_high - 1; } #define swap(a, b) \ { \ ELM tmp;\ tmp = a;\ a = b;\ b = tmp;\ } static void insertion_sort(ELM *low, ELM *high) { ELM *p, *q; ELM a, b; for (q = low + 1; q <= high; ++q) { a = q[0]; for (p = q - 1; p >= low && (b = p[0]) > a; p--) p[1] = b; p[1] = a; } } /* * tail-recursive quicksort, almost unrecognizable :-) */ void seqquick(ELM *low, ELM *high) { ELM *p; while (high - low >= bots_app_cutoff_value_2) { p = seqpart(low, high); seqquick(low, p); low = p + 1; } insertion_sort(low, high); } void seqmerge(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { ELM a1, a2; /* * The following 'if' statement is not necessary * for the correctness of the algorithm, and is * in fact subsumed by the rest of the function. * However, it is a few percent faster. Here is why. * * The merging loop below has something like * if (a1 < a2) { * *dest++ = a1; * ++low1; * if (end of array) break; * a1 = *low1; * } * * Now, a1 is needed immediately in the next iteration * and there is no way to mask the latency of the load. * A better approach is to load a1 *before* the end-of-array * check; the problem is that we may be speculatively * loading an element out of range. While this is * probably not a problem in practice, yet I don't feel * comfortable with an incorrect algorithm. Therefore, * I use the 'fast' loop on the array (except for the last * element) and the 'slow' loop for the rest, saving both * performance and correctness. */ if (low1 < high1 && low2 < high2) { a1 = *low1; a2 = *low2; for (;;) { if (a1 < a2) { *lowdest++ = a1; a1 = *++low1; if (low1 >= high1) break; } else { *lowdest++ = a2; a2 = *++low2; if (low2 >= high2) break; } } } if (low1 <= high1 && low2 <= high2) { a1 = *low1; a2 = *low2; for (;;) { if (a1 < a2) { *lowdest++ = a1; ++low1; if (low1 > high1) break; a1 = *low1; } else { *lowdest++ = a2; ++low2; if (low2 > high2) break; a2 = *low2; } } } if (low1 > high1) { memcpy(lowdest, low2, sizeof(ELM) * (high2 - low2 + 1)); } else { memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1 + 1)); } } #define swap_indices(a, b) \ { \ ELM *tmp;\ tmp = a;\ a = b;\ b = tmp;\ } ELM *binsplit(ELM val, ELM *low, ELM *high) { /* * returns index which contains greatest element <= val. If val is * less than all elements, returns low-1 */ ELM *mid; while (low != high) { mid = low + ((high - low + 1) >> 1); if (val <= *mid) high = mid - 1; else low = mid; } if (*low > val) return low - 1; else return low; } void cilkmerge_par(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { /* * Cilkmerge: Merges range [low1, high1] with range [low2, high2] * into the range [lowdest, ...] */ ELM *split1, *split2; /* * where each of the ranges are broken for * recursive merge */ long int lowsize; /* * total size of lower halves of two * ranges - 2 */ /* * We want to take the middle element (indexed by split1) from the * larger of the two arrays. The following code assumes that split1 * is taken from range [low1, high1]. So if [low1, high1] is * actually the smaller range, we should swap it with [low2, high2] */ if (high2 - low2 > high1 - low1) { swap_indices(low1, low2); swap_indices(high1, high2); } if (high2 < low2) { /* smaller range is empty */ memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1)); return; } if (high2 - low2 < bots_app_cutoff_value ) { seqmerge(low1, high1, low2, high2, lowdest); return; } /* * Basic approach: Find the middle element of one range (indexed by * split1). Find where this element would fit in the other range * (indexed by split 2). Then merge the two lower halves and the two * upper halves. */ split1 = ((high1 - low1 + 1) / 2) + low1; split2 = binsplit(*split1, low2, high2); lowsize = split1 - low1 + split2 - low2; /* * directly put the splitting element into * the appropriate location */ *(lowdest + lowsize + 1) = *split1; #pragma omp task untied cilkmerge_par(low1, split1 - 1, low2, split2, lowdest); #pragma omp task untied cilkmerge_par(split1 + 1, high1, split2 + 1, high2, lowdest + lowsize + 2); #pragma omp taskwait return; } void cilksort_par(ELM *low, ELM *tmp, long size) { /* * divide the input in four parts of the same size (A, B, C, D) * Then: * 1) recursively sort A, B, C, and D (in parallel) * 2) merge A and B into tmp1, and C and D into tmp2 (in parallel) * 3) merge tmp1 and tmp2 into the original array */ long quarter = size / 4; ELM *A, *B, *C, *D, *tmpA, *tmpB, *tmpC, *tmpD; if (size < bots_app_cutoff_value_1 ) { /* quicksort when less than 1024 elements */ seqquick(low, low + size - 1); return; } A = low; tmpA = tmp; B = A + quarter; tmpB = tmpA + quarter; C = B + quarter; tmpC = tmpB + quarter; D = C + quarter; tmpD = tmpC + quarter; #pragma omp task untied cilksort_par(A, tmpA, quarter); #pragma omp task untied cilksort_par(B, tmpB, quarter); #pragma omp task untied cilksort_par(C, tmpC, quarter); #pragma omp task untied cilksort_par(D, tmpD, size - 3 * quarter); #pragma omp taskwait #pragma omp task untied cilkmerge_par(A, A + quarter - 1, B, B + quarter - 1, tmpA); #pragma omp task untied cilkmerge_par(C, C + quarter - 1, D, low + size - 1, tmpC); #pragma omp taskwait cilkmerge_par(tmpA, tmpC - 1, tmpC, tmpA + size - 1, A); } void scramble_array( ELM *array ) { unsigned long i; unsigned long j; for (i = 0; i < bots_arg_size; ++i) { j = my_rand(); j = j % bots_arg_size; swap(array[i], array[j]); } } void fill_array( ELM *array ) { unsigned long i; my_srand(1); /* first, fill with integers 1..size */ for (i = 0; i < bots_arg_size; ++i) { array[i] = i; } } void sort_init ( void ) { /* Checking arguments */ if (bots_arg_size < 4) { bots_message("%s can not be less than 4, using 4 as a parameter.\n", BOTS_APP_DESC_ARG_SIZE ); bots_arg_size = 4; } if (bots_app_cutoff_value < 2) { bots_message("%s can not be less than 2, using 2 as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF); bots_app_cutoff_value = 2; } else if (bots_app_cutoff_value > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF, bots_arg_size); bots_app_cutoff_value = bots_arg_size; } if (bots_app_cutoff_value_1 > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_1, bots_arg_size); bots_app_cutoff_value_1 = bots_arg_size; } if (bots_app_cutoff_value_2 > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_2, bots_arg_size); bots_app_cutoff_value_2 = bots_arg_size; } if (bots_app_cutoff_value_2 > bots_app_cutoff_value_1) { bots_message("%s can not be greather than %s, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_2, BOTS_APP_DESC_ARG_CUTOFF_1, bots_app_cutoff_value_1 ); bots_app_cutoff_value_2 = bots_app_cutoff_value_1; } array = (ELM *) malloc(bots_arg_size * sizeof(ELM)); tmp = (ELM *) malloc(bots_arg_size * sizeof(ELM)); fill_array(array); scramble_array(array); } void sort_par ( void ) { bots_message("Computing multisort algorithm (n=%d) ", bots_arg_size); #pragma omp parallel #pragma omp single nowait #pragma omp task untied cilksort_par(array, tmp, bots_arg_size); bots_message(" completed!\n"); } int sort_verify ( void ) { int i, success = 1; for (i = 0; i < bots_arg_size; ++i) if (array[i] != i) success = 0; return success ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL; }
trsm_x_csr_n_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = A->rows; ALPHA_Number diag[m]; memset(diag, '\0', m * sizeof(ALPHA_Number)); int num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT r = 0; r < m; r++) { for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++) { ALPHA_INT ac = A->col_indx[ai]; if (ac == r) { diag[r] = A->values[ai]; } } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++) { for (ALPHA_INT r = m - 1; r >= 0; r--) { ALPHA_Number temp; alpha_setzero(temp); for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++) { ALPHA_INT ac = A->col_indx[ai]; if (ac > r) { alpha_madde(temp, A->values[ai], y[ac * ldy + out_y_col]); } } ALPHA_Number t; alpha_setzero(t); alpha_mul(t, alpha, x[r * ldx + out_y_col]); alpha_sube(t, temp); alpha_div(y[r * ldy + out_y_col], t, diag[r]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
DRB014-outofbounds-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The outmost loop is parallelized. But the inner level loop has out of bound access for b[i][j] when j equals to 0. This will case memory access of a previous row's last element. For example, an array of 4x4: j=0 1 2 3 i=0 x x x x 1 x x x x 2 x x x x 3 x x x x outer loop: i=2, inner loop: j=0 array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3] due to linearized row-major storage of the 2-D array. This causes loop-carried data dependence between i=2 and i=1. Data race pair: b[i][j]@75 vs. b[i][j-1]@75. */ #include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { int i,j; int n=100, m=100; double b[n][m]; #pragma omp parallel for for (i=1;i<n;i++) #pragma omp parallel for simd for (j=0;j<m;j++) b[i][j]= i * j; for (i=1;i<n;i++) #pragma omp parallel for simd for (j=0;j<m;j++) b[i][j]=b[i-1][j]; printf ("b[50][50]=%f\n",b[50][50]); return 0; }
GB_unaryop__minv_int16_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_int64 // op(A') function: GB_tran__minv_int16_int64 // C type: int16_t // A type: int64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_int64 ( int16_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
5961.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp target teams distribute thread_limit(128) schedule(dynamic, 28) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp target teams distribute thread_limit(128) schedule(dynamic, 28) for (i = 0; i < _PB_N; i++) { #pragma omp parallel for schedule(dynamic, 28) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp target teams distribute thread_limit(128) schedule(dynamic, 28) for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp parallel for schedule(dynamic, 28) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
Scene.h
#pragma once #include "bitmap_image.hpp" #include "Triangle.h" #include "Plane.h" #include <vector> #include <optional> #include <algorithm> #include <atomic> #include <utility> #include <chrono> #include <CL/cl.h> #pragma comment(lib, "OpenCL.lib") template<typename T> using Vec = std::vector<T>; template<typename T> using Opt = std::optional<T>; using namespace std::chrono; namespace RT { F64 Map(F64 x, F64 in_min, F64 in_max, F64 out_min, F64 out_max) { return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min; } static std::atomic<U64> RaysTraced = 0; static std::atomic<U64> RaysHit = 0; static std::atomic<U64> Overlaps = 0; struct Scene { Bitmap Render(U32 X, U32 Y) const { Bitmap Image(X, Y); Image.set_all_channels(0, 0, 0); auto T1 = high_resolution_clock::now(); RayTrace(Image); auto T2 = high_resolution_clock::now(); auto duration = duration_cast<microseconds>( T2 - T1 ).count(); std::cout << "Traced = " << RaysTraced.load() << "\nHit = " << RaysHit.load() << "\nOverlaps = " << Overlaps.load() << "\nDuration = " << duration << std::endl; //OpenCL(); return Image; } Scene& WithEye(const Point& P) { Eye = P; return *this; } Scene& WithBottomLeft(const Point& T) { BottomLeft = T; return *this; } Scene& WithTopRight(const Point& T) { TopRight = T; return *this; } Scene& WithStep(F64 S) { Step = S; return *this; } Scene& Add(const Triangle& T) { Objects.push_back(T); return *this; } Scene& Light(const Point& P) { Lights.push_back(P); return *this; } Point Eye, BottomLeft, TopRight; Vec<Triangle> Objects; Vec<Point> Lights; F64 Step; F64 CastNearestLight(const Point& Start, const Point& P) { return 0.0; } void AtomicStep(std::atomic<F64>& F) const { auto C = F.load(); while (!F.compare_exchange_weak(C, C + Step)); } private: const char* kernel = R"( __kernel void square(__global float* in, __global float* out) { int i = get_global_id(0); out[i] = in[i] * in[i]; } )"; void OpenCL() const { auto T1 = high_resolution_clock::now(); float* data = new float[1024]; float* out = new float[1024]; for (int i = 0; i < 1024; i++) { data[i] = (float)i; } cl_platform_id PlatformID = nullptr; cl_device_id DeviceID = nullptr; cl_uint NumDevices, NumPlatforms; cl_int Ret; Ret = clGetPlatformIDs(1, &PlatformID, &NumPlatforms); Ret = clGetDeviceIDs(PlatformID, CL_DEVICE_TYPE_GPU, 1, &DeviceID, &NumDevices); cl_context Context = clCreateContext(nullptr, 1, &DeviceID, nullptr, nullptr, &Ret); cl_command_queue Queue = clCreateCommandQueue(Context, DeviceID, 0, &Ret); cl_mem DataMem = clCreateBuffer(Context, CL_MEM_READ_ONLY, sizeof(float) * 1024, nullptr, &Ret); cl_mem OutMem = clCreateBuffer(Context, CL_MEM_WRITE_ONLY, sizeof(float) * 1024, nullptr, &Ret); Ret = clEnqueueWriteBuffer(Queue, DataMem, CL_TRUE, 0, sizeof(float) * 1024, data, 0, nullptr, nullptr); cl_program Program = clCreateProgramWithSource(Context, 1, (const char**)&kernel, nullptr, &Ret); Ret = clBuildProgram(Program, 1, &DeviceID, nullptr, nullptr, nullptr); cl_kernel Kernel = clCreateKernel(Program, "square", &Ret); Ret = clSetKernelArg(Kernel, 0, sizeof(cl_mem), &DataMem); Ret = clSetKernelArg(Kernel, 1, sizeof(cl_mem), &OutMem); size_t ItemSize = 1024; size_t ItemGroup = 64; Ret = clEnqueueNDRangeKernel(Queue, Kernel, 1, nullptr, &ItemSize, &ItemGroup, 0, nullptr, nullptr); float* Output = new float[1024]; Ret = clEnqueueReadBuffer(Queue, OutMem, CL_TRUE, 0, sizeof(float) * 1024, Output, 0, nullptr, nullptr); auto T2 = high_resolution_clock::now(); auto duration = duration_cast<microseconds>(T2 - T1).count(); std::cout << "OpenCL took " << duration << " microseconds" << std::endl; Ret = clFlush(Queue); Ret = clFinish(Queue); Ret = clReleaseKernel(Kernel); Ret = clReleaseProgram(Program); Ret = clReleaseMemObject(DataMem); Ret = clReleaseMemObject(OutMem); Ret = clReleaseCommandQueue(Queue); Ret = clReleaseContext(Context); } void RayTrace(Bitmap& Img) const { std::atomic<F64> X, Y; X.store(BottomLeft.X); while(X <= TopRight.X) { Y.store(BottomLeft.Y); #pragma omp parallel while(Y <= TopRight.Y) { TraceOnePoint(Img, { X, Y, BottomLeft.Z }); AtomicStep(Y); } AtomicStep(X); } } void TraceOnePoint(Bitmap& Img, const Point& P) const { //colour for pixel U64 R = 0, G = 0, B = 0, //how many overlaps Count = 1; //cos average F64 Cos = 0; U64 CosN = 1; F64 Dist = 0; //for every triangle in the scene for(auto& Tri : Objects) { //trace to that triangle auto I = Plane(Tri).Intersect({Eye, P}); //stats RaysTraced++; //if it intersects if(I.Valid && Tri.Contains(I.Location)) { Cos += I.Cos; CosN++; //stats RaysHit++; //figure out the distance between the camera and the hit auto D = I.Location.DistanceBetween(P); //call the vertex shader to get colour auto Colour = Tri.Shader({(U64)(Img.width() * P.X), (U64)(Img.width() * P.Y)}, this, Tri); //if the distance between the hit point and the actual point //is less than the last point this triangle must be closer if(D > Dist) { //stats Overlaps++; //change the counters to make this triangle at the front Dist = D; Count = 1; R = Colour.red; G = Colour.green; B = Colour.blue; } else { //add colour to average for this ray Count++; R += Colour.red; G += Colour.green; B += Colour.blue; } } } //get averages for raycasts R /= Count; G /= Count; B /= Count; Cos /= CosN; //get the pixel to set U32 Col = ((Cos > 0.0) ? (Cos * (Img.width() / 2)) : (Img.width() / 2) + (Cos * -(Img.width() / 2))); Col %= Img.width(); //set the actual pixel Img.set_pixel((U32)(Img.width() * P.X), (U32)(Img.width() * P.Y), (U8)R, (U8)G, (U8)B); } }; }
FGT_fmt_plug.c
/* * Fortigate (FortiOS) Password cracker * * This software is Copyright (c) 2012 Mat G. <mat.jtr at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * * Passwords are located in "config system admin" part of the configuration file : * * config system admin * edit "<username>" * set password ENC AK1wTiFOMv7mZOTvQNmKQBAY98hZZjSRLxAY8vZp8NlDWU= * * Password is : AK1|base64encode(salt|hashed_password) * where hashed_password is SHA1(salt|password|fortinet_magic) * * salt is 12 bytes long * hashed_password is 20 bytes long (SHA1 salt) * encoded password is 47 bytes long (3 bytes for AK1 and 44 bytes of base64encode(salt|hashed_password)) * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_FGT; #elif FMT_REGISTERS_H john_register_one(&fmt_FGT); #else #include <string.h> #include "common.h" #include "formats.h" #include "misc.h" #include "sha.h" #include "base64.h" #include "simd-intrinsics.h" #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 32768 // tuned on AMD K8 dual-HT (XOP) #endif #endif // __MIC__ #endif #include "memdbg.h" #define FORMAT_LABEL "Fortigate" #define FORMAT_NAME "FortiOS" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 44 #define HASH_LENGTH CIPHERTEXT_LENGTH + 3 #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define SALT_SIZE 12 #define SALT_ALIGN 4 #define FORTINET_MAGIC "\xa3\x88\xba\x2e\x42\x4c\xb0\x4a\x53\x79\x30\xc1\x31\x07\xcc\x3f\xa1\x32\x90\x29\xa9\x81\x5b\x70" #define FORTINET_MAGIC_LENGTH 24 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests fgt_tests[] = { {"AK1wTiFOMv7mZOTvQNmKQBAY98hZZjSRLxAY8vZp8NlDWU=", "fortigate"}, {"AK1Vd1SCGVtAAT931II/U22WTppAISQkITHOlz0ukIg4nA=", "admin"}, {"AK1DZLDpqz335ElPtuiNTpguiozY7xVaHjHYnxw6sNlI6A=", "ftnt"}, {NULL} }; static SHA_CTX ctx_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int (*saved_key_len); static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #if defined (_OPENMP) int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); saved_key_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key_len)); } static void done(void) { MEM_FREE(saved_key_len); MEM_FREE(crypt_key); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { if (strncmp(ciphertext, "AK1", 3)) return 0; if (strlen(ciphertext) != HASH_LENGTH) return 0; return 1; } static void * get_salt(char *ciphertext) { static union { char b[SALT_SIZE]; uint32_t dummy; } out; char buf[SALT_SIZE+BINARY_SIZE+1]; base64_decode(ciphertext+3, CIPHERTEXT_LENGTH, buf); memcpy(out.b, buf, SALT_SIZE); return out.b; } static void set_salt(void *salt) { SHA1_Init(&ctx_salt); SHA1_Update(&ctx_salt, salt, SALT_SIZE); } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1); saved_key_len[index] = strlen(key); } static char * get_key(int index) { return saved_key[index]; } static void * get_binary(char *ciphertext) { static union { char b[BINARY_SIZE]; uint32_t dummy; } bin; char buf[SALT_SIZE+BINARY_SIZE+1]; memset(buf, 0, sizeof(buf)); base64_decode(ciphertext+3, CIPHERTEXT_LENGTH, buf); // skip over the 12 bytes of salt and get only the hashed password memcpy(bin.b, buf+SALT_SIZE, BINARY_SIZE); return bin.b; } static int cmp_all(void *binary, int count) { uint32_t b0 = *(uint32_t *)binary; int i; for (i = 0; i < count; i++) { if (b0 != *(uint32_t *)crypt_key[i]) continue; if (!memcmp(binary, crypt_key[i], BINARY_SIZE)) return 1; } return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_key[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i=0; char *cp=FORTINET_MAGIC; #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(ctx_salt, count, saved_key, saved_key_len, crypt_key, cp) #endif #if defined (_OPENMP) || MAX_KEYS_PER_CRYPT>1 for (i = 0; i < count; i++) #endif { SHA_CTX ctx; memcpy(&ctx, &ctx_salt, sizeof(ctx)); SHA1_Update(&ctx, saved_key[i], saved_key_len[i]); SHA1_Update(&ctx, cp, FORTINET_MAGIC_LENGTH); SHA1_Final((unsigned char*)crypt_key[i], &ctx); } return count; } static int get_hash_0(int index) { return ((uint32_t *)(crypt_key[index]))[0] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t *)(crypt_key[index]))[0] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t *)(crypt_key[index]))[0] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t *)(crypt_key[index]))[0] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t *)(crypt_key[index]))[0] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t *)(crypt_key[index]))[0] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t *)(crypt_key[index]))[0] & PH_MASK_6; } static int salt_hash(void *salt) { uint32_t mysalt = *(uint32_t *)salt; return mysalt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_FGT = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP , { NULL }, { NULL }, fgt_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
main.c
#include <Windows.h> #include <tchar.h> #define MAX_LINE (10) LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM); inline void SafeFree(void*); int GetMyFontWidth(HWND, UINT); RECT* GetRects(HWND, UINT); int APIENTRY _tWinMain(HINSTANCE hIn, HINSTANCE prev, PTSTR cmd, int cShow) { HWND hWnd; MSG msg; WNDCLASSEX wndClass; memset(&wndClass, 0, sizeof(wndClass)); wndClass.cbSize = sizeof(wndClass); wndClass.lpszClassName = TEXT("Note Pad"); wndClass.lpszMenuName = TEXT("blAs1N's Note Pad"); wndClass.lpfnWndProc = WndProc; wndClass.hbrBackground = (HBRUSH)GetStockObject(WHITE_BRUSH); wndClass.style = CS_HREDRAW | CS_VREDRAW; wndClass.hCursor = LoadCursor(NULL, IDC_ARROW); wndClass.hIcon = LoadIcon(NULL, IDI_APPLICATION); wndClass.hIconSm = wndClass.hIcon; wndClass.hInstance = hIn; RegisterClassEx(&wndClass); hWnd = CreateWindow(wndClass.lpszClassName, wndClass.lpszMenuName, WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, NULL, NULL, hIn, NULL); ShowWindow(hWnd, cShow); UpdateWindow(hWnd); while (GetMessage(&msg, NULL, 0, 0)) { TranslateMessage(&msg); DispatchMessage(&msg); } return 0; } LRESULT CALLBACK WndProc(HWND hWnd, UINT iMsg, WPARAM wParam, LPARAM lParam) { static TCHAR* writeStr[MAX_LINE]; static RECT* windowRect; static int counts[MAX_LINE]; static int nowWidth = 0; static const UINT fontSize = 23; switch (iMsg) { case WM_CREATE: #pragma omp parallel for for (int i = 0; i < MAX_LINE; i++) { writeStr[i] = (TCHAR*)malloc(100 * sizeof(TCHAR)); memset(writeStr[i], (TCHAR)0, 100 * sizeof(TCHAR)); } windowRect = GetRects(hWnd, fontSize); memset(counts, 0, sizeof(counts)); CreateCaret(hWnd, NULL, 2, 17); ShowCaret(hWnd); break; case WM_PAINT: { PAINTSTRUCT ps; HDC hDC = BeginPaint(hWnd, &ps); HFONT hMyFont = CreateFont(fontSize, 0, 0, 0, FW_NORMAL, 0, 0, 0, ANSI_CHARSET, 0, 0, 0, 0, TEXT("consolas")); HFONT hOldFont = (HFONT)SelectObject(hDC, hMyFont); for (int i = 0; i <= nowWidth; i++) DrawText(hDC, writeStr[i], counts[i], &windowRect[i], DT_LEFT | DT_SINGLELINE); SIZE size; if (writeStr[nowWidth][0] != (TCHAR)0) GetTextExtentPoint(hDC, writeStr[nowWidth], _tcslen(writeStr[nowWidth]), &size) - 2; else { size.cx = 0; size.cy = GetMyFontWidth(hWnd, fontSize) + 11; } SetCaretPos(size.cx, nowWidth * size.cy); SelectObject(hDC, hOldFont); DeleteObject(hMyFont); EndPaint(hWnd, &ps); break; } case WM_CHAR: { if (wParam == VK_BACK) { if (nowWidth > 9) nowWidth = 9; if (counts[0] > 0) counts[nowWidth]--; if (nowWidth > 0 && counts[nowWidth] < 0) { counts[nowWidth--] = 0; counts[nowWidth]--; } } else if (wParam == VK_RETURN || counts[nowWidth] > 99) { if (nowWidth < MAX_LINE - 1) nowWidth++; } else if (nowWidth < MAX_LINE) writeStr[nowWidth][counts[nowWidth]++] = (TCHAR)wParam; if (nowWidth < MAX_LINE || wParam == VK_BACK) writeStr[nowWidth][counts[nowWidth]] = (TCHAR)0; InvalidateRgn(hWnd, NULL, TRUE); break; } case WM_DESTROY: HideCaret(hWnd); DestroyCaret(); for (int i = 0; i < MAX_LINE; i++) SafeFree(writeStr[i]); SafeFree(windowRect); PostQuitMessage(0); break; } return DefWindowProc(hWnd, iMsg, wParam, lParam); } inline void SafeFree(void* trash) { trash = NULL; free(trash); } int GetMyFontWidth(HWND hWnd, UINT fontSize) { int result; HDC hDC = GetDC(hWnd); HFONT hMyFont = CreateFont(fontSize, 0, 0, 0, FW_NORMAL, 0, 0, 0, ANSI_CHARSET, 0, 0, 0, 0, TEXT("consolas")); HFONT hOldFont = (HFONT)SelectObject(hDC, hMyFont); GetCharWidth(hDC, 0, 0, &result); SelectObject(hDC, hOldFont); DeleteObject(hMyFont); ReleaseDC(hWnd, hDC); return result; } RECT* GetRects(HWND hWnd, UINT fontSize) { RECT* result = (RECT*)malloc(MAX_LINE * sizeof(RECT)); int fontWidth = GetMyFontWidth(hWnd, fontSize); GetClientRect(hWnd, &result[0]); result[0].bottom = fontWidth * 2; #pragma omp parallel for for (int i = 1; i < MAX_LINE; i++) { result[i] = result[i - 1]; result[i].top += fontWidth * 2; result[i].bottom = result[i].top + fontWidth * 2; } return result; }
GB_binop__land_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_int8) // A.*B function (eWiseMult): GB (_AemultB_01__land_int8) // A.*B function (eWiseMult): GB (_AemultB_02__land_int8) // A.*B function (eWiseMult): GB (_AemultB_03__land_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int8) // A*D function (colscale): GB (_AxD__land_int8) // D*A function (rowscale): GB (_DxB__land_int8) // C+=B function (dense accum): GB (_Cdense_accumB__land_int8) // C+=b function (dense accum): GB (_Cdense_accumb__land_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int8) // C=scalar+B GB (_bind1st__land_int8) // C=scalar+B' GB (_bind1st_tran__land_int8) // C=A+scalar GB (_bind2nd__land_int8) // C=A'+scalar GB (_bind2nd_tran__land_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_INT8 || GxB_NO_LAND_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__land_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__land_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__land_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pr79512.c
/* PR c++/79512 */ /* { dg-options "-fopenmp-simd" } */ void foo (void) { #pragma omp target #pragma omp teams { int i; for (i = 0; i < 10; i++) ; } }
openmp_piseed.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> void main(int argc, char *argv[]) { int i; int count = 0; double x, y; int samples, nthreads; double pi; samples = atoi(argv[1]); nthreads = atoi(argv[2]); double start = omp_get_wtime(); #pragma omp parallel firstprivate(x, y, i) reduction(+:count) num_threads(nthreads) { int seed = omp_get_thread_num(); #pragma omp for for (i = 0; i < samples; i++) { x = (double)rand_r(&seed) / RAND_MAX; y = (double)rand_r(&seed) / RAND_MAX; if (x*x + y*y <= 1){ count++; } } } double end = omp_get_wtime(); printf("elapsed time: %.16g\n", end - start); pi = 4.0 * ((double)count/(double)samples); printf("Count = %d, Sample = %d, Estimate of pi = %7.5f\n", count, samples, pi); }
relativeneighborhoodgraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef VECTORSEARCH_THIRD_PARTY_SPTAG_RELATIVENEIGHBORHOODGRAPH_H_ #define VECTORSEARCH_THIRD_PARTY_SPTAG_RELATIVENEIGHBORHOODGRAPH_H_ #include "neighborhoodgraph.h" namespace vsearch { namespace COMMON { class RelativeNeighborhoodGraph : public NeighborhoodGraph { public: void RebuildNeighbors(VectorIndex* index, const int node, int* nodes, const BasicResult* queryResults, const int numResults) { int count = 0; for (int j = 0; j < numResults && count < m_iNeighborhoodSize; j++) { const BasicResult& item = queryResults[j]; if (item.VID < 0) break; if (item.VID == node) continue; bool good = true; for (int k = 0; k < count; k++) { if (index->ComputeDistance(index->GetSample(nodes[k]), index->GetSample(item.VID)) <= item.Dist) { good = false; break; } } if (good) nodes[count++] = item.VID; } for (int j = count; j < m_iNeighborhoodSize; j++) nodes[j] = -1; } void InsertNeighbors(VectorIndex* index, const int node, int insertNode, float insertDist) { int* nodes = m_pNeighborhoodGraph[node]; for (int k = 0; k < m_iNeighborhoodSize; k++) { int tmpNode = nodes[k]; if (tmpNode < -1) continue; if (tmpNode < 0) { bool good = true; for (int t = 0; t < k; t++) { if (index->ComputeDistance(index->GetSample(insertNode), index->GetSample(nodes[t])) < insertDist) { good = false; break; } } if (good) { nodes[k] = insertNode; } break; } float tmpDist = index->ComputeDistance(index->GetSample(node), index->GetSample(tmpNode)); if (insertDist < tmpDist || (insertDist == tmpDist && insertNode < tmpNode)) { bool good = true; for (int t = 0; t < k; t++) { if (index->ComputeDistance(index->GetSample(insertNode), index->GetSample(nodes[t])) < insertDist) { good = false; break; } } if (good) { nodes[k] = insertNode; insertNode = tmpNode; insertDist = tmpDist; } else { break; } } } } float GraphAccuracyEstimation( VectorIndex* index, const int samples, const std::unordered_map<int, int>* idmap = nullptr) { int* correct = new int[samples]; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < samples; i++) { int x = Utils::rand_int(m_iGraphSize); // int x = i; COMMON::QueryResultSet<void> query(nullptr, m_iCEF); for (int y = 0; y < m_iGraphSize; y++) { if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue; float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y)); query.AddPoint(y, dist); } query.SortResult(); int* exact_rng = new int[m_iNeighborhoodSize]; RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF); correct[i] = 0; for (int j = 0; j < m_iNeighborhoodSize; j++) { if (exact_rng[j] == -1) { correct[i] += m_iNeighborhoodSize - j; break; } for (int k = 0; k < m_iNeighborhoodSize; k++) if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) { correct[i]++; break; } } delete[] exact_rng; } float acc = 0; for (int i = 0; i < samples; i++) acc += float(correct[i]); acc = acc / samples / m_iNeighborhoodSize; delete[] correct; return acc; } }; } // COMMON } // vsearch #endif //VECTORSEARCH_THIRD_PARTY_SPTAG_RELATIVENEIGHBORHOODGRAPH_H_
GB_unop__atanh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__atanh_fc64_fc64 // op(A') function: GB_unop_tran__atanh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = catanh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = catanh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = catanh (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATANH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__atanh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = catanh (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = catanh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__atanh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pt_to_pt_multiPingping.c
/***************************************************************************** * * * Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 * * * * produced by * * * * Mark Bull, Jim Enright and Fiona Reid * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk * * * * * * Copyright 2012, The University of Edinburgh * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ /*-----------------------------------------------------------*/ /* Contains the point-to-point multi-pingping mixed mode */ /* OpenMP/MPI benchmarks. */ /* This includes: -masteronly multiPingping */ /* -funnelled multiPingping */ /* -multiple multiPingping */ /*-----------------------------------------------------------*/ #include "pt_to_pt_multiPingping.h" /*-----------------------------------------------------------*/ /* multiPingPing */ /* */ /* Driver subroutine for the multi-pingping benchmark. */ /*-----------------------------------------------------------*/ int multiPingping(int benchmarkType){ int dataSizeIter; char otherProcName[MPI_MAX_PROCESSOR_NAME]; int balance; pingNodeA = 0; pingNodeB = 1; /* Check if there's a balance in num of MPI processes on pingNodeA and pingNodeB. */ balance = crossCommBalance(pingNodeA, pingNodeB); /* If not balanced.. */ if (balance == FALSE){ /* ..master prints error */ if (myMPIRank == 0){ printBalanceError(); } /* ..and all process exit function. */ return 1; } /* Exchange MPI_COMM_WORLD ranks for processes in same crossComm */ exchangeWorldRanks(pingNodeA, pingNodeB, &otherPingRank); /* Processes on pongNode send processor name to pingNode procs. */ sendProcName(pingNodeA, pingNodeB, otherProcName); /* Print comm world ranks & processor name of processes * taking part in multi-pingpong benchmark. */ printMultiProcInfo(pingNodeA, otherPingRank, otherProcName); /* Barrier to ensure that all procs have completed * printMultiProcInfo before prinring column headings. */ MPI_Barrier(comm); /* Master process then prints report column headings */ if (myMPIRank == 0){ printBenchHeader(); } /* Initialise repsToDo to defaultReps at start of benchmark */ repsToDo = defaultReps; /* Initialise dataSizeIter */ dataSizeIter = minDataSize; /* Start loop over data sizes */ while (dataSizeIter <= maxDataSize){ /* set size of buffer */ sizeofBuffer = dataSizeIter * numThreads; /* Allocate space for the main data arrays */ allocateMultiPingpingData(sizeofBuffer); /* warm-up */ if (benchmarkType == MASTERONLY){ /* Masteronly warm-up */ masteronlyMultiPingping(warmUpIters, dataSizeIter); } else if (benchmarkType == FUNNELLED){ /* Funnelled warm-up sweep */ funnelledMultiPingping(warmUpIters, dataSizeIter); } else if (benchmarkType == MULTIPLE){ /* Multiple pingpong warm-up */ multipleMultiPingping(warmUpIters, dataSizeIter); } /* Verification test for multi-pingpong */ testMultiPingping(sizeofBuffer, dataSizeIter); /* Initialise benchmark */ benchComplete = FALSE; /* Keep executing benchmark until target time is reached */ while (benchComplete != TRUE){ /* MPI_Barrier to synchronise processes. Then start the timer. */ MPI_Barrier(comm); startTime = MPI_Wtime(); if (benchmarkType == MASTERONLY){ /* Execute masteronly multipingpong repsToDo times */ masteronlyMultiPingping(repsToDo, dataSizeIter); } else if (benchmarkType == FUNNELLED){ /* Execute funnelled multipingpong */ funnelledMultiPingping(repsToDo, dataSizeIter); } else if (benchmarkType == MULTIPLE){ multipleMultiPingping(repsToDo, dataSizeIter); } /* Stop the timer..MPI_Barrier to synchronise processes * for more accurate timing. */ MPI_Barrier(comm); finishTime = MPI_Wtime(); totalTime = finishTime - startTime; /* Call repTimeCheck to check if target time is reached. */ if (myMPIRank==0){ benchComplete = repTimeCheck(totalTime, repsToDo); } /* Ensure all procs have the same value of benchComplete */ /* and repsToDo */ MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm); MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm); } /* End of loop to check if benchComplete is true */ /* Master process sets benchmark results */ if (myMPIRank == 0){ setReportParams(dataSizeIter, repsToDo, totalTime); printReport(); } /* Free the allocated space for the main data arrays */ freeMultiPingpingData(); /* Update dataSize before next iteration */ dataSizeIter = dataSizeIter * 2; } return 0; } /*-----------------------------------------------------------*/ /* masteronlyMultiPingping */ /* */ /* All Processes with rank of pingNodeA or pingNodeB in */ /* crossComm send a message to each other. */ /* MPI communication takes place outside of the parallel */ /* region. */ /*-----------------------------------------------------------*/ int masteronlyMultiPingping(int totalReps, int dataSize){ int repIter, i; int destRank; /* set destRank to ID of other process */ if (crossCommRank == pingNodeA){ destRank = pingNodeB; } else if (crossCommRank == pingNodeB){ destRank = pingNodeA; } /* loop totalRep times */ for (repIter=1; repIter<=totalReps; repIter++){ if ((crossCommRank == pingNodeA) || (crossCommRank == pingNodeB) ){ /* Each thread writes its globalID to pingSendBuf * with a parallel for directive. */ #pragma omp parallel for default(none) \ private(i) \ shared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray) \ schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ pingSendBuf[i] = globalIDarray[myThreadID]; } /* Process calls non-blocking send to start transfer of * pingSendBuf to other process. */ MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, TAG,\ crossComm, &requestID); /* Processes then wait for message from other process. */ MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, TAG, \ crossComm, &status); /* Finish the send operation with an MPI_Wait */ MPI_Wait(&requestID, &status); /* Threads under the MPI processes read their part of the * received buffer. */ #pragma omp parallel for default(none) \ private(i) \ shared(finalRecvBuf,dataSize,sizeofBuffer,pingRecvBuf) \ schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalRecvBuf[i] = pingRecvBuf[i]; } } } /* End repetitions loop */ return 0; } /*-----------------------------------------------------------*/ /* funnelledMultiPingping */ /* */ /* All processes with rank of pingNodeA or pingNodeB in */ /* crossComm send a message to each other. */ /* Inter-process communication takes place inside the */ /* OpenMP parallel region by the master thread. */ /*-----------------------------------------------------------*/ int funnelledMultiPingping(int totalReps, int dataSize){ int repIter, i; int destRank; /* Set destRank to id of other process */ if (crossCommRank == pingNodeA){ destRank = pingNodeB; } else if (crossCommRank == pingNodeB){ destRank = pingNodeA; } /* Open the parallel region */ #pragma omp parallel default(none) \ private(i,repIter) \ shared(dataSize,sizeofBuffer,pingSendBuf,globalIDarray) \ shared(pingRecvBuf,finalRecvBuf,status,requestID,destRank) \ shared(crossComm,crossCommRank,pingNodeA,pingNodeB,totalReps) { /* loop totalRep times */ for (repIter = 1; repIter <= totalReps; repIter++){ if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ /* Each thread writes its globalID to its part of * pingSendBuf with an omp for. */ #pragma omp for schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ pingSendBuf[i] = globalIDarray[myThreadID]; } /* Implicit barrier here takes care of necessary synchronisation. */ #pragma omp master { /* Master thread of each process starts send. */ MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, \ destRank, TAG, crossComm, &requestID); /* Processes then wait for message. */ MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, \ destRank, TAG, crossComm, &status); /* Finish the send operation with an MPI_Wait */ MPI_Wait(&requestID, &status); } /* Barrier to ensure master thread has completed transfer. */ #pragma omp barrier /* Each thread reads its part of the received buffer */ #pragma omp for schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalRecvBuf[i] = pingRecvBuf[i]; } } } /* End repetitions loop */ } /* End parallel region */ return 0; } /*-----------------------------------------------------------*/ /* multipleMultiPingping */ /* */ /* All processes with crossCommRank of pingNodeA and */ /* pingNodeB in crossComm send a message to each other. */ /* Multiple threads take part in the communication. */ /*-----------------------------------------------------------*/ int multipleMultiPingping(int totalReps, int dataSize){ int repIter, i; int destRank; int lBound; /* set destRank to be id of other process */ if (crossCommRank == pingNodeA){ destRank = pingNodeB; } else if (crossCommRank == pingNodeB){ destRank = pingNodeA; } /* Open parallel region */ #pragma omp parallel default(none) \ private(i,repIter,lBound,requestID,status) \ shared(dataSize,sizeofBuffer,pingSendBuf,globalIDarray) \ shared(pingRecvBuf,finalRecvBuf,destRank,crossComm) \ shared(crossCommRank,pingNodeA,pingNodeB,totalReps) { /* loop totalRep times */ for (repIter = 1; repIter <= totalReps; repIter++){ if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ /* Calculate lower bound of each threads portion * of the data array. */ lBound = (myThreadID * dataSize); /* Each thread writes to its part of pingSendBuf */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ pingSendBuf[i] = globalIDarray[myThreadID]; } /* Each thread starts send of dataSize items from * pingSendBuf. */ MPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, \ destRank, myThreadID, crossComm, &requestID); /* Thread then waits for message from destRank * with tag equal to its threadID. */ MPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, destRank, \ myThreadID, crossComm, &status); /* Thread completes send using MPI_Wait */ MPI_Wait(&requestID, &status); /* Each thread reads its part of received buffer. */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalRecvBuf[i] = pingRecvBuf[i]; } } } /* End repetitions loop */ } return 0; } /*-----------------------------------------------------------*/ /* allocateMultiPingpingData */ /* */ /* Allocates space for the main data arrays. */ /* Size of each array is specified by subroutine argument. */ /*-----------------------------------------------------------*/ int allocateMultiPingpingData(int sizeofBuffer){ if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ pingSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer); pingRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); finalRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); } return 0; } /*-----------------------------------------------------------*/ /* freeMultiPingpingData */ /* */ /* Free allocated memory for main data arrays. */ /*-----------------------------------------------------------*/ int freeMultiPingpingData(){ if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ free(pingSendBuf); free(pingRecvBuf); free(finalRecvBuf); } return 0; } /*-----------------------------------------------------------*/ /* testMultiPingping */ /* */ /* Verifies the the multi-pingping benchmark worked */ /* correctly. */ /*-----------------------------------------------------------*/ int testMultiPingping(int sizeofBuffer, int dataSize){ int i; int testFlag, localTestFlag; /* set localTestFlag to true */ localTestFlag = TRUE; /* Testing done for processes on pingNodeA & pingNodeB */ if (crossCommRank == pingNodeA || crossCommRank == pingNodeB) { /* allocate space for testBuf */ testBuf = (int *)malloc(sizeof(int) * sizeofBuffer); /* Construct testBuf with correct values */ #pragma omp parallel for default(none) \ private(i) \ shared(otherPingRank,numThreads,dataSize,sizeofBuffer,testBuf) \ schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ /* calculate globalID of thread expected in finalRecvBuf. * This is done using otherPingRank. */ testBuf[i] = (otherPingRank * numThreads) + myThreadID; } /* Compare each element of testBuf and finalRecvBuf */ for (i=0; i<sizeofBuffer; i++){ if (testBuf[i] != finalRecvBuf[i]){ localTestFlag = FALSE; } } /* Free space for testBuf */ free(testBuf); } /* Reduce testFlag into master with logical AND */ MPI_Reduce(&localTestFlag, &testFlag, 1, MPI_INT, MPI_LAND, 0, comm); /* master sets testOutcome flag */ if (myMPIRank == 0){ setTestOutcome(testFlag); } return 0; }
cungqr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zungqr.c, normal z -> c, Fri Sep 28 17:38:04 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_ungqr * * Generates an m-by-n matrix Q with orthonormal columns, which * is defined as the first n columns of a product of the elementary reflectors * returned by plasma_cgeqrf. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix Q. m >= 0. * * @param[in] n * The number of columns of the matrix Q. m >= n >= 0. * * @param[in] k * The number of columns of elementary tile reflectors whose product * defines the matrix Q. * n >= k >= 0. * * @param[in] pA * Details of the QR factorization of the original matrix A as returned * by plasma_cgeqrf, where the k first columns are the reflectors. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[in] T * Auxiliary factorization data, computed by plasma_cgeqrf. * * @param[out] pQ * On exit, pointer to the m-by-n matrix Q. * * @param[in] ldq * The leading dimension of the array Q. ldq >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_cungqr * @sa plasma_cungqr * @sa plasma_dorgqr * @sa plasma_sorgqr * @sa plasma_cgeqrf * ******************************************************************************/ int plasma_cungqr(int m, int n, int k, plasma_complex32_t *pA, int lda, plasma_desc_t T, plasma_complex32_t *pQ, int ldq) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0 || n > m) { plasma_error("illegal value of n"); return -2; } if (k < 0 || k > n) { plasma_error("illegal value of k"); return -3; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -5; } if (ldq < imax(1, m)) { plasma_error("illegal value of ldq"); return -8; } // quick return if (n <= 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_geqrf(plasma, PlasmaComplexFloat, m, n); // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t Q; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, m, n, 0, 0, m, k, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, m, n, 0, 0, m, k, &Q); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = ib*nb; // unmqr: work retval = plasma_workspace_create(&work, lwork, PlasmaComplexFloat); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); plasma_omp_cge2desc(pQ, ldq, Q, &sequence, &request); // Call the tile async function. plasma_omp_cungqr(A, T, Q, work, &sequence, &request); // Translate Q back to LAPACK layout. plasma_omp_cdesc2ge(Q, pQ, ldq, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&Q); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_ungqr * * Non-blocking tile version of plasma_cungqr(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] A * Descriptor of matrix A. * A is stored in the tile layout. * * @param[in] T * Descriptor of matrix T. * Auxiliary factorization data, computed by plasma_cgeqrf. * * @param[out] Q * Descriptor of matrix Q. On exit, matrix Q stored in the tile layout. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For multiplication by Q contains preallocated space for work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_cungqr * @sa plasma_omp_cungqr * @sa plasma_omp_dorgqr * @sa plasma_omp_sorgqr * @sa plasma_omp_cgeqrf * ******************************************************************************/ void plasma_omp_cungqr(plasma_desc_t A, plasma_desc_t T, plasma_desc_t Q, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(Q) != PlasmaSuccess) { plasma_error("invalid Q"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (Q.n <= 0) return; // Set Q to identity. plasma_pclaset(PlasmaGeneral, 0.0, 1.0, Q, sequence, request); // Construct Q. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pcungqr_tree(A, T, Q, work, sequence, request); } else { plasma_pcungqr(A, T, Q, work, sequence, request); } }
GB_unaryop__minv_uint8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint8_uint32 // op(A') function: GB_tran__minv_uint8_uint32 // C type: uint8_t // A type: uint32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint8_uint32 ( uint8_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fibo_para_1.c
//fibo_para_1.c #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <stdint.h> #include <windows.h> double fibo_pd( int n , double a[]) { long double i,j; if ( a[n] == 0 ){ #pragma omp task shared( i ) firstprivate ( n ) i = fibo_pd(n - 1, a ); #pragma omp task shared( j ) firstprivate ( n ) j = fibo_pd(n - 2, a ); #pragma omp taskwait a[n] = i + j; } return a[n]; } int main(int argc, char** argv ) { unsigned n = atoi(argv[1]); double a[n]; for (size_t i = 0; i < n; i++) { a[i] = 0; } a[0] = 1; a[1] = 1; double res; LARGE_INTEGER frequency; LARGE_INTEGER start; LARGE_INTEGER end; double interval; QueryPerformanceFrequency(&frequency); QueryPerformanceCounter(&start); #pragma omp parallel default ( none ) shared(res, a, n) num_threads( 4 ) { #pragma omp single res = fibo_pd( n - 1 , a ); } QueryPerformanceCounter(&end); interval = (double) (end.QuadPart - start.QuadPart) / frequency.QuadPart; printf("Tiempo: %f\n", interval); printf("%.0Lf\n",res); }
filter_ground_removal2.h
// MIT License // Copyright (c) 2019 Edward Liu // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #pragma once #include <limits> #include <memory> #include <thread> #include <unordered_map> #include <utility> #include <vector> #include "common/simple_thread_pool.h" #include "pre_processors/filter_interface.h" // implementation of paper // "Fast Segmentation of 3D Pointcloud for Ground Vehicles", 2010 namespace static_map { namespace pre_processers { namespace filter { template <typename PointT> class GroundRemoval2 : public Interface<PointT> { public: USE_POINTCLOUD; using Point = Eigen::Vector2f; // d, z struct Grid { Point min_z_point; std::vector<std::pair<int, Point>> points; }; struct Line { Point start, end; }; struct LocalLine { float m = 0.; float b = 0.; }; using Segment = std::vector<Line>; private: // parameters to insert the cloud into bins float r_max_; float r_min_; int32_t bin_num_; int32_t segment_num_; // line fitting parameters float start_ground_height_; float long_line_threshold_; float max_long_line_height_; float max_start_height_; // max error for line fitting float max_error_; // y = mx + b ( max m and max b) float max_slope_; float max_b_; // cluster parameters // maximum vertical distance of point to line to be considered ground float max_dist_to_line_; // search other segments to find matched line float search_angle_; // degree int32_t thread_num_; // point to a 2d array std::vector<Grid> grids_; public: GroundRemoval2() : Interface<PointT>(), r_max_(100.), r_min_(1.), bin_num_(200), segment_num_(180), start_ground_height_(-0.25), long_line_threshold_(1.0), max_long_line_height_(0.1), max_start_height_(0.2), max_error_(0.05), max_slope_(std::tan(M_PI / 12.)), max_b_(0.1), max_dist_to_line_(0.05), search_angle_(10.) /* degree */ , thread_num_(4) { INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 0, "r_max", r_max_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 1, "r_min", r_min_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 2, "start_ground_height", start_ground_height_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 3, "long_line_threshold", long_line_threshold_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 4, "max_long_line_height", max_long_line_height_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 5, "max_start_height", max_start_height_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 6, "max_error", max_error_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 7, "max_slope", max_slope_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 8, "max_b", max_b_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 9, "max_dist_to_line", max_dist_to_line_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 10, "search_angle", search_angle_); // int32_t params INIT_INNER_PARAM(Interface<PointT>::kInt32Param, 0, "bin_num", bin_num_); INIT_INNER_PARAM(Interface<PointT>::kInt32Param, 1, "segment_num", segment_num_); INIT_INNER_PARAM(Interface<PointT>::kInt32Param, 2, "thread_num", thread_num_); } ~GroundRemoval2() {} GroundRemoval2(const GroundRemoval2&) = delete; GroundRemoval2& operator=(const GroundRemoval2&) = delete; std::shared_ptr<Interface<PointT>> CreateNewInstance() override { return std::make_shared<GroundRemoval2<PointT>>(); } void SetInputCloud(const PointCloudPtr& cloud) override { this->inliers_.clear(); this->outliers_.clear(); if (cloud == nullptr || cloud->empty()) { LOG(WARNING) << "cloud empty, do nothing!" << std::endl; this->inner_cloud_ = nullptr; return; } // step1 initialise this->inner_cloud_ = cloud; // init the grids grids_.clear(); Grid default_grid_value; default_grid_value.min_z_point[0] = 1.e6; default_grid_value.min_z_point[1] = 1.e6; grids_.resize(bin_num_ * segment_num_, default_grid_value); const float double_pi = M_PI * 2; const float delta_alpha = double_pi / segment_num_; const float delta_bin = (r_max_ - r_min_) / bin_num_; const int size = this->inner_cloud_->size(); struct InnerPoint { int s_index; int b_index; Point point; // d, z int cloud_index; }; std::vector<InnerPoint> inner_points; inner_points.resize(size); // step2 insert the cloud into grids #if defined _OPENMP #pragma omp parallel for num_threads(LOCAL_OMP_THREADS_NUM) #endif for (int i = 0; i < size; ++i) { auto& point = this->inner_cloud_->points[i]; float range = std::sqrt(point.x * point.x + point.y * point.y); if (range < r_min_ && range > r_max_) { inner_points[i].s_index = -1; inner_points[i].b_index = -1; } else { // index float rad = std::atan2(point.y, point.x); if (rad < 0.) { rad += double_pi; } int32_t s_index = rad / delta_alpha; int32_t b_index = (range - r_min_) / delta_bin; // clamp the indices if (b_index >= bin_num_) { b_index = bin_num_ - 1; } else if (b_index < 0) { b_index = 0; } if (s_index >= segment_num_) { s_index = segment_num_ - 1; } else if (s_index < 0) { s_index = 0; } inner_points[i].s_index = s_index; inner_points[i].b_index = b_index; inner_points[i].cloud_index = i; inner_points[i].point[0] = range; inner_points[i].point[1] = point.z; } } for (auto& inner_point : inner_points) { if (inner_point.s_index < 0) { continue; } auto& grid = grids_.at(GridIndex(inner_point.s_index, inner_point.b_index)); if (grid.points.empty() || inner_point.point[1] < grid.min_z_point[1]) { grid.min_z_point[0] = inner_point.point[0]; grid.min_z_point[1] = inner_point.point[1]; } if (inner_point.point[1] <= grid.min_z_point[1] + 0.5) { grid.points.push_back( std::make_pair(inner_point.cloud_index, Point(inner_point.point[0], inner_point.point[1]))); } } } void Filter(const PointCloudPtr& cloud) override { if (!cloud || !Interface<PointT>::inner_cloud_) { LOG(WARNING) << "nullptr cloud, do nothing!" << std::endl; return; } // step1 prepare this->FilterPrepare(cloud); std::vector<Segment> segments; segments.resize(segment_num_); FitSegments(&segments); // step2 cluster ( ground ) auto cloud_size = this->inner_cloud_->size(); std::vector<uint8_t> is_outlier(cloud_size, 0); ClusterGround(segments, &is_outlier); // step3 manage inliers and outliers for (int i = 0; i < cloud_size; ++i) { if (is_outlier[i]) { this->outliers_.push_back(i); } else { this->inliers_.push_back(i); cloud->push_back(this->inner_cloud_->points[i]); } } // no need to sort inliers and outliers( already in-order ) } void DisplayAllParams() override { PARAM_INFO(r_max_); PARAM_INFO(r_min_); PARAM_INFO(start_ground_height_); PARAM_INFO(long_line_threshold_); PARAM_INFO(max_long_line_height_); PARAM_INFO(max_start_height_); PARAM_INFO(max_error_); PARAM_INFO(max_slope_); PARAM_INFO(max_b_); PARAM_INFO(max_dist_to_line_); PARAM_INFO(search_angle_); // int32_t params PARAM_INFO(bin_num_); PARAM_INFO(segment_num_); PARAM_INFO(thread_num_); } protected: Segment FitLines(const int32_t& seg_index) { CHECK(seg_index >= 0 && seg_index < segment_num_); Segment segment; int start_index = 0; for (start_index = 0; start_index < bin_num_; ++start_index) { if (!grids_[GridIndex(seg_index, start_index)].points.empty()) { break; } } if (start_index >= bin_num_ - 1) { return std::move(segment); } std::vector<Point> current_line_points; current_line_points.push_back( grids_[GridIndex(seg_index, start_index)].min_z_point); LocalLine current_line; bool is_long_line = false; float ground_height = start_ground_height_; for (int i = start_index + 1; i < bin_num_; ++i) { auto& grid = grids_.at(GridIndex(seg_index, i)); if (grid.points.empty()) { continue; } auto& current_point = grid.min_z_point; if (current_point[0] - current_line_points.back()[0] >= long_line_threshold_) { is_long_line = true; } float expected_z = std::numeric_limits<float>::max(); if (is_long_line && current_line_points.size() > 2) { expected_z = current_line.m * current_point[0] + current_line.b; } if (current_line_points.size() >= 2) { current_line_points.push_back(current_point); current_line = FitLocalLine(current_line_points); auto error = GetMaxError(current_line_points, current_line); if (error > max_error_ || std::fabs(current_line.m) > max_slope_ || // std::fabs(current_line.b - ground_height ) > max_b_ || (is_long_line && std::fabs(expected_z - current_point[1]) > max_long_line_height_)) { current_line_points.pop_back(); if (current_line_points.size() >= 3) { auto new_line = FitLocalLine(current_line_points); segment.push_back(LocalLineToLine(new_line, current_line_points)); // update ground height ground_height = new_line.m * current_line_points.back()[0] + new_line.b; } // start a new line is_long_line = false; current_line_points.erase(current_line_points.begin(), --current_line_points.end()); --i; } } else { if (!is_long_line && std::fabs(current_line_points.back()[1] - ground_height) < max_start_height_) { current_line_points.push_back(current_point); } else { // start a new line current_line_points.clear(); current_line_points.push_back(current_point); } } } if (current_line_points.size() > 2) { auto new_line = FitLocalLine(current_line_points); segment.push_back(LocalLineToLine(new_line, current_line_points)); } return std::move(segment); } void FitSegments(std::vector<Segment>* const segments) { auto calculate_in_one_thread = [&](const int& index) { (*segments)[index] = FitLines(index); }; #if defined _OPENMP // openmp version #pragma omp parallel for num_threads(LOCAL_OMP_THREADS_NUM) for (int i = 0; i < segment_num_; ++i) { calculate_in_one_thread(i); } #else // thread pool version common::ThreadPool pool(thread_num_); for (int i = 0; i < segment_num_; ++i) { pool.enqueue(calculate_in_one_thread, i); } #endif } void ClusterGround(const std::vector<Segment>& segments, std::vector<uint8_t>* const is_outlier) { CHECK(is_outlier); const float delta_alpha = M_PI * 2 / segment_num_; int search_max_step = search_angle_ / 180. * M_PI / delta_alpha; std::vector<int> segment_index_candidate; for (int i = search_max_step; i > 0; --i) { segment_index_candidate.push_back(i); segment_index_candidate.push_back(-i); } // using thread pool to accelerate auto pool = std::make_shared<common::ThreadPool>(thread_num_); auto calculate_in_one_thread = [&](const int& s /* seg_index */) { for (int b = 0; b < bin_num_; ++b) { auto grid_index = GridIndex(s, b); auto& grid = grids_[grid_index]; if (grid.points.empty()) { continue; } for (auto& index_point : grid.points) { auto& point = index_point.second; auto distance = VerticalDistanceToSegment(point, segments.at(s)); if (distance < 0.) { // getting a distance < 0 means that you did not // find a line match the point // you can try find the line in close segment for (auto i : segment_index_candidate) { int can_seg_index = s + i; if (can_seg_index < 0) { can_seg_index += segment_num_; } else if (can_seg_index >= segment_num_) { can_seg_index -= segment_num_; } distance = VerticalDistanceToSegment(point, segments.at(can_seg_index)); if (distance > 0.) { break; } } } if (distance > 0. && distance <= max_dist_to_line_) { // this is a ground removal filter // so, if you found a point on ground // you should add it into outliers (*is_outlier)[index_point.first] = 1; } } // end loop in on grid } // loop for bins }; for (int i = 0; i < segment_num_; ++i) { pool->enqueue(calculate_in_one_thread, i); } // reset the shared pointer to destroy the thread pool // the destrcutor will wait for all threads then return pool.reset(); } float VerticalDistanceToSegment(const Point& point, const Segment& seg) { const float margin = 0.1; float distance = -1.; for (auto& line : seg) { CHECK(line.start[0] < line.end[0]); if (line.start[0] - margin < point[0] && line.end[0] + margin > point[0]) { float delta_z = line.end[1] - line.start[1]; float delta_d = line.end[0] - line.start[0]; float expected_z = (point[0] - line.start[0]) / delta_d * delta_z + line.start[1]; distance = std::fabs(point[1] - expected_z); } } return distance; } inline LocalLine FitLocalLine(const std::vector<Point>& points) { LocalLine line_result; auto point_num = points.size(); Eigen::MatrixXd X(point_num, 2); Eigen::VectorXd Y(point_num); for (int i = 0; i < point_num; ++i) { X(i, 0) = points[i][0]; X(i, 1) = 1; Y(i) = points[i][1]; } Eigen::VectorXd result = X.colPivHouseholderQr().solve(Y); line_result.m = result(0); line_result.b = result(1); return line_result; } inline float GetMaxError(const std::vector<Point>& points, const LocalLine& line) { float max_error = 0.; for (auto& point : points) { float error = std::fabs(line.m * point[0] + line.b - point[1]); if (error > max_error) { max_error = error; } } return max_error; } inline Line LocalLineToLine(const LocalLine& local_line, const std::vector<Point>& line_points) { Line line; auto start_d = line_points.front()[0]; auto end_d = line_points.back()[0]; line.start[0] = start_d; line.start[1] = local_line.m * start_d + local_line.b; line.end[0] = end_d; line.end[1] = local_line.m * end_d + local_line.b; return line; } inline int32_t GridIndex(int32_t seg_index, int32_t bin_index) { return seg_index * bin_num_ + bin_index; } }; } // namespace filter } // namespace pre_processers } // namespace static_map
GB_binop__isgt_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_08__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_04__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int16) // A*D function (colscale): GB (_AxD__isgt_int16) // D*A function (rowscale): GB (_DxB__isgt_int16) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_int16) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int16) // C=scalar+B GB (_bind1st__isgt_int16) // C=scalar+B' GB (_bind1st_tran__isgt_int16) // C=A+scalar GB (_bind2nd__isgt_int16) // C=A'+scalar GB (_bind2nd_tran__isgt_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT16 || GxB_NO_ISGT_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isgt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isgt_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isgt_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
9500.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp target teams distribute for (i = 1; i < n+1; i++) { #pragma omp parallel for for (j = 1; j < n+1; j++) { #pragma omp simd for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
SE_fg_extend_fcn_mex.c
#include "mex.h" #include "../SE_fgg.h" void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int); #define HIN prhs[0] #define OPT prhs[1] #define HOUT plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { const int N = 1; const double* H_per = mxGetPr(HIN); SE_FGG_params params; SE_FGG_MEX_params(&params, OPT, N); size_t dims[3] = {params.npdims[0], params.npdims[1], params.npdims[2]}; HOUT = mxCreateNumericArray(3, dims, mxDOUBLE_CLASS, mxREAL); SE_FGG_work work; work.H = mxGetPr(HOUT); #ifdef _OPENMP #pragma omp parallel default(shared) #endif { #ifdef THREE_PERIODIC SE_FGG_extend_fcn(&work, H_per, &params); #endif #ifdef TWO_PERIODIC SE2P_FGG_extend_fcn(&work, H_per, &params); #endif #ifdef ONE_PERIODIC SE1P_FGG_extend_fcn(&work, H_per, &params); #endif } }
coloring_johansson.h
#include <iterator> #include <utility> #include <thread> #include "coloring_common.h" namespace GMS::Coloring { typedef vector<int32_t> ColVec; typedef ColVec::iterator ColPointer; typedef vector<ColPointer> PointerVec; typedef int32_t Color; // Class that stores the palettes class Palettes { public: // allocate the memory for the palettes and indecies Palettes(size_t n, int32_t max_degree) { // use c style arrays because vectors always get inititialized sequentially when created or when using push_back delta_plus_two = max_degree + 2; // palette of colors still available for the taking // first element will be the size of the palette palettes = new Color[n * delta_plus_two]; // index to find if a given color is in the palette, since palette is unordered // first element will never be used, as there is no color 0 // *palette_indecies[v, x] = color x palette_indecies = new Color*[n * delta_plus_two]; // initialize palletes with colors 0..max_degree in parallel #pragma omp parallel for for (NodeId v = 0; v < n; v++) { // the initial size of the palette is delta + 1 palettes[p_ind(v, 0)] = max_degree + 1; for (int i = 1; i < delta_plus_two; i++) { palette_indecies[p_ind(v, i)] = &palettes[p_ind(v, i)]; palettes[p_ind(v, i)] = i; } } } ~Palettes() { delete[] palettes; delete[] palette_indecies; } inline Color at(NodeId v, int32_t pos) { return palettes[p_ind(v, pos)]; } // get the size of the palette of node v inline size_t size(NodeId v) { return palettes[p_ind(v, 0)]; } inline Color* p_at(NodeId v, int32_t pos) { return &palettes[p_ind(v, pos+1)]; } inline Color* p_first(NodeId v) { return &palettes[p_ind(v, 1)]; } inline Color* p_last(NodeId v) { return &palettes[p_ind(v, size(v))]; } // return a pointer to the color c in the palette of node v inline Color* p_color(NodeId v, Color c) { return palette_indecies[p_ind(v, c)]; } inline bool contains(NodeId v, Color c) { return p_color(v, c) <= p_last(v); } inline void remove_color(NodeId v, Color c) { Color* last_pos = p_last(v); // update the palette index Color* p_color_to_remove = palette_indecies[p_ind(v, c)]; // p_color // set the index pointer of the color in question to point to the current end of the palette palette_indecies[p_ind(v, c)] = last_pos; // set the index pointer of the last color in the palette to position of the color to be removed palette_indecies[p_ind(v, *last_pos)] = p_color_to_remove; // remove the color picked by the neighbour from own palette and resize palette properly *p_color_to_remove = *last_pos; *last_pos = c; palettes[p_ind(v, 0)]--; } private: int32_t* palettes; int32_t** palette_indecies; int32_t delta_plus_two; // returns index of an element in the palette of vertex v inline int32_t p_ind(int32_t v, int32_t elem) { return delta_plus_two * v + elem; } }; template <class CGraph> int graph_coloring_johansson_no_updates(const CGraph& g, vector<int32_t>& colors) { size_t n = g.num_nodes(); int32_t max_degree = 0; DetailTimer detailTimer; // caluculate the max_degree of the graph, in O(log n) #pragma omp parallel for reduction(max : max_degree) for (NodeId v = 0; v < n; v++) { if (max_degree < g.out_degree(v)) { max_degree = g.out_degree(v); } } // initialize multiple random selectors for concurrent access vector<random_selector<>> randoms(omp_get_max_threads()); for (int32_t i = 0; i < randoms.size(); i++) { randoms[i] = random_selector<>(); } // keep track of hom many nodes are colored vector<int32_t> nodes_colored(omp_get_max_threads(), 0); int32_t nodes_remaining = n; detailTimer.endPhase("init"); int32_t colored = 0; int iter = 0; // until all nodes are colored... while (nodes_remaining > 0) { #pragma omp parallel { // select a random color from the node's palette, // remember which element that was s.t. we can later see if the coloring took place this round #pragma omp for schedule(static) for (NodeId v = 0; v < n; v++) { // move on if this node is already colored if (colors[v] > 0) continue; // negate the color to indicate that it has been picked this turn colors[v] = - randoms[omp_get_thread_num()].select_num(1, max_degree + 1); } // check if the neighbours also picked that color, // if so we don't want to pick the color this round #pragma omp for schedule(static) for (NodeId v = 0; v < n; v++) { if (colors[v] > 0) continue; for (NodeId u : g.out_neigh(v)) { if (abs(colors[v]) == abs(colors[u])) { colors[v] = 0; break; } } if(colors[v] != 0){ nodes_colored[omp_get_thread_num()]++; colors[v] = abs(colors[v]); continue; } } } // END PARALLEL SECTION std::string phaseName("coloring_step_"); phaseName += std::to_string(iter++); detailTimer.endPhase(phaseName.c_str()); for (int32_t i = 0; i < omp_get_max_threads(); i++) { colored += nodes_colored[i]; } nodes_remaining = n - colored; colored = 0; } detailTimer.print(); return -1; } //int graph_coloring_johansson_updates(const CGraph& g, vector<int32_t>& colors, const AlgoParameters& algoParams) { // size_t n = g.num_nodes(); // // int32_t max_degree = 0; // DetailTimer detailTimer; // // // caluculate the max_degree of the graph, in O(log n) //#pragma omp parallel for reduction(max : max_degree) // for (NodeId v = 0; v < n; v++) { // if (max_degree < g.out_degree(v)) { // max_degree = g.out_degree(v); // } // } // // Palettes palettes(n, max_degree); // // // initialize multiple random selectors for concurrent access // vector<random_selector<>> randoms(omp_get_max_threads()); // for (int32_t i = 0; i < randoms.size(); i++) { // randoms[i] = random_selector<>(); // } // // // keep track of hom many nodes are colored // vector<int32_t> nodes_colored(omp_get_max_threads(), 0); // int32_t nodes_remaining = n; // // detailTimer.endPhase("init"); // // int32_t colored = 0; // int iter = 0; // // until all nodes are colored... // while (nodes_remaining > 0) { //#pragma omp parallel // { // // select a random color from the node's palette, // // remember which element that was s.t. we can later see if the coloring took place this round //#pragma omp for // for (NodeId v = 0; v < n; v++) { // // move on if this node is already colored // if (colors[v] > 0) // continue; // // // node got colored last round, mark it as prior colored and move on // if (colors[v] < 0) { // colors[v] = abs(colors[v]); // continue; // } // // // select a random element from the palette // Color* rand_pointer = randoms[omp_get_thread_num()] // .select_array(palettes.p_first(v), palettes.size(v)); // // negate the color to indicate that it has been picked this turn // colors[v] = - *rand_pointer; // } // // // check if the neighbours also picked that color, // // if so we don't want to pick the color this round //#pragma omp for // for (NodeId v = 0; v < n; v++) { // if (colors[v] > 0) // continue; // for (NodeId u : g.out_neigh(v)) { // if (colors[v] == colors[u] && v < u) { // colors[v] = 0; // break; // } // } // } // // // remove the color that was picked by the node, // // if one was picked at all //#pragma omp for // for (NodeId v = 0; v < n; v++) { // // if the node got a color prior to this round we skip this step // if (colors[v] > 0) // continue; // // if the node got a color this round, note that we colored a node and move on // if(colors[v] < 0){ // nodes_colored[omp_get_thread_num()]++; // continue; // } // // the node is not colored // for (NodeId u : g.out_neigh(v)) { // // neighbour got a color this round, that color is still in our palette // if(colors[u] < 0 && palettes.contains(v, abs(colors[u]))) { // palettes.remove_color(v, abs(colors[u])); // } // } // } // } // END PARALLEL SECTION // std::string phaseName("coloring_step_"); // phaseName += std::to_string(iter++); // detailTimer.endPhase(phaseName.c_str()); // //#pragma omp for reduction(+ : colored) // for (int32_t i = 0; i < omp_get_max_threads(); i++) { // colored += nodes_colored[i]; // } // nodes_remaining = n - colored; // colored = 0; // } // // // make sure any node colored in the last iteration is positive //#pragma omp parallel for // for(NodeId v = 0; v < n; v++) { // colors[v] = abs(colors[v]); // } // detailTimer.endPhase("final_abs"); // // detailTimer.print(); // return -1; //} template <class CGraph> int graph_coloring_johansson(const CGraph& g, vector<int32_t>& colors) { // string mode = algoParams.get("mode", "noupdates"); // if(mode == "noupdates") { return graph_coloring_johansson_no_updates(g, colors); // } else if(mode == "updates") { // return graph_coloring_johansson_updates(g, colors, algoParams); // } // return -1; } } // namespace GMS::Coloring
pbkdf2-hmac-sha512_fmt_plug.c
/* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Based on hmac-sha512 by magnum * * Minor fixes, format unification and OMP support done by Dhiru Kholia * <dhiru@openwall.com> * * Fixed for supporting $ml$ "dave" format as well as GRUB native format by * magnum 2013. Note: We support a binary size of >512 bits (64 bytes / 128 * chars of hex) but we currently do not calculate it even in cmp_exact(). The * chance for a 512-bit hash collision should be pretty dang slim. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pbkdf2_hmac_sha512; #elif FMT_REGISTERS_H john_register_one(&fmt_pbkdf2_hmac_sha512); #else #include <ctype.h> #include <string.h> #include <assert.h> #include <stdint.h> #include "misc.h" #include "arch.h" #include "common.h" #include "formats.h" #include "sha2.h" #include "johnswap.h" #include "pbkdf2_hmac_common.h" #include "pbkdf2_hmac_sha512.h" #define FORMAT_LABEL "PBKDF2-HMAC-SHA512" #undef FORMAT_NAME #define FORMAT_NAME "GRUB2 / OS X 10.8+" #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "PBKDF2-SHA512 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #define SALT_SIZE sizeof(struct custom_salt) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define PAD_SIZE 128 #define PLAINTEXT_LENGTH 125 static struct custom_salt { uint8_t length; uint8_t salt[PBKDF2_64_MAX_SALT_SIZE]; uint32_t rounds; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[PBKDF2_SHA512_BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *p; int saltlen; char delim; memset(&cs, 0, sizeof(cs)); ciphertext += PBKDF2_SHA512_TAG_LEN; cs.rounds = atou(ciphertext); delim = strchr(ciphertext, '.') ? '.' : '$'; ciphertext = strchr(ciphertext, delim) + 1; p = strchr(ciphertext, delim); saltlen = 0; while (ciphertext < p) { /** extract salt **/ cs.salt[saltlen++] = atoi16[ARCH_INDEX(ciphertext[0])] * 16 + atoi16[ARCH_INDEX(ciphertext[1])]; ciphertext += 2; } cs.length = saltlen; return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; union { uint32_t *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_SHA512_BINARY_SIZE, 0); #else pbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_SHA512_BINARY_SIZE, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], PBKDF2_SHA512_BINARY_SIZE); } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static int cmp_exact(char *source, int index) { return pbkdf2_hmac_sha512_cmp_exact(get_key(index), source, cur_salt->salt, cur_salt->length, cur_salt->rounds); } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->rounds; } struct fmt_main fmt_pbkdf2_hmac_sha512 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, PBKDF2_SHA512_BINARY_SIZE, sizeof(uint32_t), SALT_SIZE, sizeof(ARCH_WORD), MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, { "iteration count", }, { PBKDF2_SHA512_FORMAT_TAG, FORMAT_TAG_ML, FORMAT_TAG_GRUB }, pbkdf2_hmac_sha512_common_tests }, { init, done, fmt_default_reset, pbkdf2_hmac_sha512_prepare, pbkdf2_hmac_sha512_valid, pbkdf2_hmac_sha512_split, pbkdf2_hmac_sha512_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
sample-1.c
#include <omp.h> #include <stdio.h> int main() { int threadnumber=1; int threadID=1; printf("I am thread %d, \tthreads %d \n",threadID,threadnumber); omp_set_num_threads(20); #pragma omp parallel // [NOTE]: OpenMP only parallelize partially { // [NOTE]: must enter to new line. threadnumber=omp_get_num_threads(); threadID = omp_get_thread_num(); printf("Hello from thread %d, \t nthreads %d\n",threadID,threadnumber); } printf("I am thread %d, \tnthreads %d\n",threadID,threadnumber); }
ChMatrix.h
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Alessandro Tasora, Radu Serban // ============================================================================= #ifndef CHMATRIX_H #define CHMATRIX_H #include <immintrin.h> #include "chrono/core/ChCoordsys.h" #include "chrono/core/ChException.h" #include "chrono/ChConfig.h" #include "chrono/serialization/ChArchive.h" #include "chrono/serialization/ChArchiveAsciiDump.h" namespace chrono { // // FAST MACROS TO SPEEDUP CODE // #define Set33Element(a, b, val) SetElementN(((a * 3) + (b)), val) #define Get33Element(a, b) GetElementN((a * 3) + (b)) #define Set34Element(a, b, val) SetElementN(((a * 4) + (b)), val) #define Get34Element(a, b) GetElementN((a * 4) + (b)) #define Set34Row(ma, a, val0, val1, val2, val3) \ ma.SetElementN((a * 4), val0); \ ma.SetElementN((a * 4) + 1, val1); \ ma.SetElementN((a * 4) + 2, val2); \ ma.SetElementN((a * 4) + 3, val3); #define Set44Element(a, b, val) SetElementN(((a * 4) + (b)), val) #define Get44Element(a, b) GetElementN((a * 4) + (b)) // forward declaration template <class Real = double> class ChMatrixDynamic; /// /// ChMatrix: /// /// A base class for matrix objects (tables of NxM numbers). /// To access elements, the indexes start from zero, and /// you must indicate first row, then column, that is: m(2,4) /// means the element at 3rd row, 5th column. /// This is an abstract class, so you cannot instantiate /// objects from it: you must rather create matrices using the /// specialized child classes like ChMatrixDynamic, ChMatrixNM, /// ChMatrix33 and so on; all of them have this same base class. /// Warning: for optimization reasons, not all functions will /// check about boundaries of element indexes and matrix sizes (in /// some cases, if sizes are wrong, debug asserts are used). /// /// Further info at the @ref mathematical_objects manual page. template <class Real = double> class ChMatrix { protected: // // DATA // int rows = 1; int columns = 1; Real* address; public: // // CONSTRUCTORS (none - abstract class that must be implemented with child classes) // virtual ~ChMatrix() {} // // OPERATORS OVERLOADING // /// Parenthesis () operator, to access a single element of the matrix, by /// supplying the row and the column (indexes start from 0). /// For example: m(3,5) gets the element at the 4th row, 6th column. /// Value is returned by reference, so it can be modified, like in m(1,2)=10. Real& operator()(const int row, const int col) { assert(row >= 0 && col >= 0 && row < rows && col < columns); return (*(address + col + (row * columns))); } const Real& operator()(const int row, const int col) const { assert(row >= 0 && col >= 0 && row < rows && col < columns); return (*(address + col + (row * columns))); } /// Parenthesis () operator, to access a single element of the matrix, by /// supplying the ordinal of the element (indexes start from 0). /// For example: m(3) gets the 4th element, counting row by row. /// Mostly useful if the matrix is Nx1 sized (i.e. a N-element vector). /// Value is returned by reference, so it can be modified, like in m(1,2)=10. Real& operator()(const int el) { assert(el >= 0 && el < rows * columns); return (*(address + el)); } const Real& operator()(const int el) const { assert(el >= 0 && el < rows * columns); return (*(address + el)); } /// The [] operator returns the address of the n-th row. This is mostly /// for compatibility with old matrix programming styles (2d array-like) /// where to access an element at row i, column j, one can write mymatrix[i][j]. Real* operator[](const int row) { assert(row >= 0 && row < rows); return ((address + (row * columns))); } const Real* operator[](const int row) const { assert(row >= 0 && row < rows); return ((address + (row * columns))); } /// Multiplies this matrix by a factor, in place ChMatrix<Real>& operator*=(const Real factor) { MatrScale(factor); return *this; } /// Increments this matrix by another matrix, in place template <class RealB> ChMatrix<Real>& operator+=(const ChMatrix<RealB>& matbis) { MatrInc(matbis); return *this; } /// Decrements this matrix by another matrix, in place template <class RealB> ChMatrix<Real>& operator-=(const ChMatrix<RealB>& matbis) { MatrDec(matbis); return *this; } /// Matrices are equal? bool operator==(const ChMatrix<Real>& other) { return Equals(other); } /// Matrices are not equal? bool operator!=(const ChMatrix<Real>& other) { return !Equals(other); } /// Assignment operator virtual ChMatrix<Real>& operator=(const ChMatrix<Real>& matbis) { if (&matbis != this) CopyFromMatrix(matbis); return *this; } template <class RealB> ChMatrix<Real>& operator=(const ChMatrix<RealB>& matbis) { CopyFromMatrix(matbis); return *this; } // // FUNCTIONS // /// Sets the element at row,col position. Indexes start with zero. void SetElement(int row, int col, Real elem) { assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks *(address + col + (row * columns)) = elem; } /// Gets the element at row,col position. Indexes start with zero. /// The return value is a copy of original value. Use Element() instead if you /// want to access directly by reference the original element. Real GetElement(int row, int col) { assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks return (*(address + col + (row * columns))); } Real GetElement(int row, int col) const { assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks return (*(address + col + (row * columns))); } /// Sets the Nth element, counting row after row. void SetElementN(int index, Real elem) { assert(index >= 0 && index < (rows * columns)); // boundary checks *(address + index) = elem; } /// Gets the Nth element, counting row after row. Real GetElementN(int index) { assert(index >= 0 && index < (rows * columns)); return (*(address + index)); } const Real GetElementN(int index) const { assert(index >= 0 && index < (rows * columns)); return (*(address + index)); } /// Access a single element of the matrix, by /// supplying the row and the column (indexes start from 0). /// Value is returned by reference, so it can be modified, like in m.Element(1,2)=10. Real& Element(int row, int col) { assert(row >= 0 && col >= 0 && row < rows && col < columns); return (*(address + col + (row * columns))); } const Real& Element(int row, int col) const { assert(row >= 0 && col >= 0 && row < rows && col < columns); return (*(address + col + (row * columns))); } /// Access a single element of the matrix, the Nth element, counting row after row. /// Value is returned by reference, so it can be modified, like in m.Element(5)=10. Real& ElementN(int index) { assert(index >= 0 && index < (rows * columns)); return (*(address + index)); } const Real& ElementN(int index) const { assert(index >= 0 && index < (rows * columns)); return (*(address + index)); } /// Access directly the "Real* address" buffer. Warning! this is a low level /// function, it should be used in rare cases, if really needed! Real* GetAddress() { return address; } const Real* GetAddress() const { return address; } /// Gets the number of rows int GetRows() const { return rows; } /// Gets the number of columns int GetColumns() const { return columns; } /// Reallocate memory for a new size. VIRTUAL! Must be implemented by child classes! virtual void Resize(int nrows, int ncols) {} /// Swaps the columns a and b void SwapColumns(int a, int b) { Real temp; for (int i = 0; i < rows; i++) { temp = GetElement(i, a); SetElement(i, a, GetElement(i, b)); SetElement(i, b, temp); } } /// Swap the rows a and b void SwapRows(int a, int b) { Real temp; for (int i = 0; i < columns; i++) { temp = GetElement(a, i); SetElement(a, i, GetElement(b, i)); SetElement(b, i, temp); } } /// Fill the diagonal elements, given a sample. /// Note that the matrix must already be square (no check for /// rectangular matrices!), and the extra-diagonal elements are /// not modified -this function does not set them to 0- void FillDiag(Real sample) { for (int i = 0; i < rows; ++i) SetElement(i, i, sample); } /// Fill the matrix with the same value in all elements void FillElem(Real sample) { for (int i = 0; i < rows * columns; ++i) SetElementN(i, sample); } /// Fill the matrix with random float numbers, falling within the /// "max"/"min" range. void FillRandom(Real max, Real min) { for (int i = 0; i < rows * columns; ++i) SetElementN(i, min + (Real)ChRandom() * (max - min)); } /// Resets the matrix to zero (warning: simply sets memory to 0 bytes!) virtual void Reset() { // SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns); for (int i = 0; i < rows * columns; ++i) this->address[i] = 0; } /// Reset to zeroes and (if needed) changes the size to have row and col void Reset(int nrows, int ncols) { Resize(nrows, ncols); // SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns); for (int i = 0; i < rows * columns; ++i) this->address[i] = 0; } /// Reset to identity matrix (ones on diagonal, zero elsewhere) void SetIdentity() { Reset(); FillDiag(1.0); } /// Copy a matrix "matra" into this matrix. Note that /// the destination matrix will be resized if necessary. template <class RealB> void CopyFromMatrix(const ChMatrix<RealB>& matra) { Resize(matra.GetRows(), matra.GetColumns()); // ElementsCopy(address, matra.GetAddress(), rows*columns); // memcpy (address, matra.address, (sizeof(Real) * rows * columns)); for (int i = 0; i < rows * columns; ++i) address[i] = (Real)matra.GetAddress()[i]; } /// Copy the transpose of matrix "matra" into this matrix. Note that /// the destination matrix will be resized if necessary. template <class RealB> void CopyFromMatrixT(const ChMatrix<RealB>& matra) { Resize(matra.GetColumns(), matra.GetRows()); for (int i = 0; i < matra.GetRows(); ++i) for (int j = 0; j < matra.GetColumns(); ++j) SetElement(j, i, (Real)matra.Element(i, j)); } /// Copy the transposed upper triangular part of "matra" in the lower triangular /// part of this matrix. (matra must be square) /// Note that the destination matrix will be resized if necessary. template <class RealB> // _______ // void CopyTUpMatrix(const ChMatrix<RealB>& matra) // \ | |\ // { // \ A'| ---> | \ // Resize(matra.GetRows(), matra.GetColumns()); // \ | |this\ // for (int i = 0; i < matra.GetRows(); i++) { // \| |______\ // for (int j = 0; j < matra.GetRows(); j++) SetElement(j, i, (Real)matra.GetElement(i, j)); } } /// Copy the transposed lower triangulat part of "matra" in the upper triangular /// part of this matrix. (matra must be square) /// Note that the destination matrix will be resized if necessary. template <class RealB> // _______ // void CopyTLwMatrix(const ChMatrix<RealB>& matra) // |\ \ | // { // | \ ---> \this| // Resize(matra.GetRows(), matra.GetColumns()); // |A' \ \ | // for (int i = 0; i < matra.GetRows(); i++) { // |______\ \| // for (int j = 0; j < matra.GetRows(); j++) SetElement(i, j, (Real)matra.GetElement(j, i)); } } // // STREAMING // /// Method to allow serialization of transient data in archives. virtual void ArchiveOUT(ChArchiveOut& marchive) { // suggested: use versioning marchive.VersionWrite(1); // stream out all member data if (ChArchiveAsciiDump* mascii = dynamic_cast<ChArchiveAsciiDump*>(&marchive)) { // CUSTOM row x col 'intuitive' table-like log when using ChArchiveAsciiDump: mascii->indent(); mascii->GetStream()->operator<<(rows); mascii->GetStream()->operator<<(" rows, "); mascii->GetStream()->operator<<(columns); mascii->GetStream()->operator<<(" columns:\n"); for (int i = 0; i < rows; i++) { mascii->indent(); for (int j = 0; j < columns; j++) { (*mascii->GetStream()) << Element(i, j); mascii->GetStream()->operator<<(", "); } mascii->GetStream()->operator<<("\n"); } } else { marchive << make_ChNameValue("rows", rows); marchive << make_ChNameValue("columns", columns); // NORMAL array-based serialization: int tot_elements = GetRows() * GetColumns(); ChValueSpecific< Real* > specVal(this->address, "data", 0); marchive.out_array_pre(specVal, tot_elements); for (int i = 0; i < tot_elements; i++) { marchive << CHNVP(ElementN(i), ""); marchive.out_array_between(tot_elements); } marchive.out_array_end(tot_elements); } } /// Method to allow de serialization of transient data from archives. virtual void ArchiveIN(ChArchiveIn& marchive) { // suggested: use versioning int version = marchive.VersionRead(); // stream in all member data int m_row, m_col; marchive >> make_ChNameValue("rows", m_row); marchive >> make_ChNameValue("columns", m_col); Reset(m_row, m_col); // custom input of matrix data as array size_t tot_elements = GetRows() * GetColumns(); marchive.in_array_pre("data", tot_elements); for (int i = 0; i < tot_elements; i++) { marchive >> CHNVP(ElementN(i)); marchive.in_array_between("data"); } marchive.in_array_end("data"); } /// Method to allow serializing transient data into in ascii /// as a readable item, for example "chrono::GetLog() << myobject;" /// ***OBSOLETE*** void StreamOUT(ChStreamOutAscii& mstream) { mstream << "\n" << "Matrix " << GetRows() << " rows, " << GetColumns() << " columns." << "\n"; for (int i = 0; i < ChMin(GetRows(), 8); i++) { for (int j = 0; j < ChMin(GetColumns(), 8); j++) mstream << GetElement(i, j) << " "; if (GetColumns() > 8) mstream << "..."; mstream << "\n"; } if (GetRows() > 8) mstream << "... \n\n"; } /// Method to allow serializing transient data into an ascii stream (ex. a file) /// as a Matlab .dat file (all numbers in a row, separated by space, then CR) void StreamOUTdenseMatlabFormat(ChStreamOutAscii& mstream) { for (int ii = 0; ii < this->GetRows(); ii++) { for (int jj = 0; jj < this->GetColumns(); jj++) { mstream << this->GetElement(ii, jj); if (jj < (this->GetColumns() - 1)) mstream << " "; } mstream << "\n"; } } // // MATH MEMBER FUNCTIONS. // For speed reasons, sometimes size checking of operands is left to the user! // /// Changes the sign of all the elements of this matrix, in place. void MatrNeg() { for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) = -ElementN(nel); } /// Sum two matrices, and stores the result in "this" matrix: [this]=[A]+[B]. template <class RealB, class RealC> void MatrAdd(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) { assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows()); assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows()); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) = (Real)(matra.ElementN(nel) + matrb.ElementN(nel)); } /// Subtract two matrices, and stores the result in "this" matrix: [this]=[A]-[B]. template <class RealB, class RealC> void MatrSub(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) { assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows()); assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows()); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) = (Real)(matra.ElementN(nel) - matrb.ElementN(nel)); } /// Increments this matrix with another matrix A, as: [this]+=[A] template <class RealB> void MatrInc(const ChMatrix<RealB>& matra) { assert(matra.GetColumns() == columns && matra.GetRows() == rows); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) += (Real)matra.ElementN(nel); } /// Increments this matrix by \p val, as [this]+=val void MatrInc(Real val) { for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) += val; } /// Decrements this matrix with another matrix A, as: [this]-=[A] template <class RealB> void MatrDec(const ChMatrix<RealB>& matra) { assert(matra.GetColumns() == columns && matra.GetRows() == rows); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) -= (Real)matra.ElementN(nel); } /// Scales a matrix, multiplying all elements by a constant value: [this]*=f void MatrScale(Real factor) { for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) *= factor; } /// Scales a matrix, multiplying all element by all other elements of /// matra (it is not the classical matrix multiplication!) template <class RealB> void MatrScale(const ChMatrix<RealB>& matra) { assert(matra.GetColumns() == columns && matra.GetRows() == rows); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) *= (Real)matra.ElementN(nel); } /// Scales a matrix, dividing all elements by a constant value: [this]/=f void MatrDivScale(Real factor) { for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) /= factor; } /// Scales a matrix, dividing all element by all other elements of /// matra (it is not the classical matrix multiplication!) template <class RealB> void MatrDivScale(const ChMatrix<RealB>& matra) { assert(matra.GetColumns() == columns && matra.GetRows() == rows); for (int nel = 0; nel < rows * columns; ++nel) ElementN(nel) /= (Real)matra.ElementN(nel); } /// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B]. template <class RealB, class RealC> void MatrMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) { assert(matra.GetColumns() == matrb.GetRows()); assert(this->rows == matra.GetRows()); assert(this->columns == matrb.GetColumns()); int col, row, colres; Real sum; for (colres = 0; colres < matrb.GetColumns(); ++colres) { for (row = 0; row < matra.GetRows(); ++row) { sum = 0; for (col = 0; col < matra.GetColumns(); ++col) sum += (Real)(matra.Element(row, col) * matrb.Element(col, colres)); SetElement(row, colres, sum); } } } #ifdef CHRONO_HAS_AVX /// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B]. /// AVX implementation: The speed up is marginal if size of the matrices are small, e.g. 3*3 /// Generally, as the matra.GetColumns() increases the method performs better void MatrMultiplyAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) { assert(matra.GetColumns() == matrb.GetRows()); assert(this->rows == matra.GetRows()); assert(this->columns == matrb.GetColumns()); int A_Nrow = matra.GetRows(); int B_Nrow = matrb.GetRows(); int A_NCol = matra.GetColumns(); int B_NCol = matrb.GetColumns(); const double* A_add = matra.GetAddress(); const double* B_add = matrb.GetAddress(); double* this_Add = this->GetAddress(); for (int rowA = 0; rowA < A_Nrow; rowA++) { for (int colB = 0; colB < B_NCol; colB += 4) { __m256d sum = _mm256_setzero_pd(); for (int elem = 0; elem < A_NCol; elem++) { __m256d ymmA = _mm256_broadcast_sd(A_add + A_NCol * rowA + elem); __m256d ymmB = _mm256_loadu_pd(B_add + elem * B_NCol + colB); __m256d prod = _mm256_mul_pd(ymmA, ymmB); sum = _mm256_add_pd(sum, prod); } _mm256_storeu_pd(this_Add + rowA * B_NCol + colB, sum); } } } /// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]' /// Note: This method is faster than MatrMultiplyT if matra.GetColumns()%4=0 && matra.GetColumns()>8 /// It is still fast if matra.GetColumns() is large enough even if matra.GetColumns()%4!=0 void MatrMultiplyTAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) { assert(matra.GetColumns() == matrb.GetColumns()); assert(this->GetRows() == matra.GetRows()); assert(this->GetColumns() == matrb.GetRows()); int A_Nrow = matra.GetRows(); int B_Nrow = matrb.GetRows(); int A_NCol = matra.GetColumns(); int B_NCol = matrb.GetColumns(); const double* A_add = matra.GetAddress(); const double* B_add = matrb.GetAddress(); bool NeedsPadding = (B_NCol % 4 != 0); int CorrectFAT = ((B_NCol >> 2) << 2); for (int rowA = 0; rowA < A_Nrow; rowA++) { for (int rowB = 0; rowB < B_Nrow; rowB++) { int colB; double temp_sum = 0.0; __m256d sum = _mm256_setzero_pd(); for (colB = 0; colB < CorrectFAT; colB += 4) { __m256d ymmA = _mm256_loadu_pd(A_add + rowA * A_NCol + colB); __m256d ymmB = _mm256_loadu_pd(B_add + rowB * B_NCol + colB); __m256d prod = _mm256_mul_pd(ymmA, ymmB); sum = _mm256_add_pd(sum, prod); } sum = _mm256_hadd_pd(sum, sum); temp_sum = ((double*)&sum)[0] + ((double*)&sum)[2]; if (NeedsPadding) for (colB = CorrectFAT; colB < B_NCol; colB++) { temp_sum += (matra.Element(rowA, colB) * matrb.Element(rowB, colB)); } SetElement(rowA, rowB, temp_sum); } } } #endif /// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]' /// Faster than doing B.MatrTranspose(); result.MatrMultiply(A,B); /// Note: no check on mistaken size of this! template <class RealB, class RealC> void MatrMultiplyT(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) { assert(matra.GetColumns() == matrb.GetColumns()); assert(this->rows == matra.GetRows()); assert(this->columns == matrb.GetRows()); int col, row, colres; Real sum; for (colres = 0; colres < matrb.GetRows(); ++colres) { for (row = 0; row < matra.GetRows(); ++row) { sum = 0; for (col = 0; col < matra.GetColumns(); ++col) sum += (Real)(matra.Element(row, col) * matrb.Element(colres, col)); SetElement(row, colres, sum); } } } /// Multiplies two matrices (the first is considered transposed): [this]=[A]'*[B] /// Faster than doing A.MatrTranspose(); result.MatrMultiply(A,B); template <class RealB, class RealC> void MatrTMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) { assert(matra.GetRows() == matrb.GetRows()); assert(this->rows == matra.GetColumns()); assert(this->columns == matrb.GetColumns()); int col, row, colres; Real sum; for (colres = 0; colres < matrb.GetColumns(); ++colres) { for (row = 0; row < matra.GetColumns(); ++row) { sum = 0; for (col = 0; col < (matra.GetRows()); ++col) sum += (Real)(matra.Element(col, row) * matrb.Element(col, colres)); SetElement(row, colres, sum); } } } /// Computes dot product between two column-matrices (vectors) with /// same size. Returns a scalar value. template <class RealB, class RealC> static Real MatrDot(const ChMatrix<RealB>& ma, const ChMatrix<RealC>& mb) { assert(ma.GetColumns() == mb.GetColumns() && ma.GetRows() == mb.GetRows()); Real tot = 0; for (int i = 0; i < ma.GetRows(); ++i) tot += (Real)(ma.ElementN(i) * mb.ElementN(i)); return tot; } /// Transpose this matrix in place void MatrTranspose() { if (columns == rows) // Square transp.is optimized { for (int row = 0; row < rows; ++row) for (int col = row; col < columns; ++col) if (row != col) { Real temp = Element(row, col); Element(row, col) = Element(col, row); Element(col, row) = temp; } int tmpr = rows; rows = columns; columns = tmpr; } else // Naive implementation for rectangular case. Not in-place. Slower. { ChMatrixDynamic<Real> matrcopy(*this); int tmpr = rows; rows = columns; columns = tmpr; // dont' realloc buffer, anyway for (int row = 0; row < rows; ++row) for (int col = 0; col < columns; ++col) Element(row, col) = matrcopy.Element(col, row); } } /// Returns the determinant of the matrix. /// Note! This method must be used only with max 4x4 matrices, /// otherwise it throws an exception. Real Det() { assert(this->GetRows() == this->GetColumns()); assert(this->GetRows() <= 4); if (this->GetRows() != this->GetColumns()) throw("Cannot compute matrix determinant because rectangular matrix"); if (this->GetRows() > 4) throw("Cannot compute matrix determinant because matr. larger than 3x3"); Real det = 0; switch (this->GetRows()) { case 1: det = (*this)(0, 0); break; case 2: det = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0); break; case 3: det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) + (*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(2, 0) * (*this)(1, 1) * (*this)(0, 2) - (*this)(2, 1) * (*this)(1, 2) * (*this)(0, 0) - (*this)(2, 2) * (*this)(1, 0) * (*this)(0, 1); break; case 4: det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3) + (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) + (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) + (*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) + (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) + (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) + (*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3) + (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) + (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) + (*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) + (*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) + (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) - (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) - (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) - (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) - (*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3) - (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) - (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) - (*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) - (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) - (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2) - (*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1); break; } return det; } /// Returns the inverse of the matrix. /// Note! This method must be used only with max 4x4 matrices, /// otherwise it throws an exception. void MatrInverse() { assert(this->GetRows() == this->GetColumns()); assert(this->GetRows() <= 4); assert(this->Det() != 0); if (this->GetRows() != this->GetColumns()) throw("Cannot compute matrix inverse because rectangular matrix"); if (this->GetRows() > 4) throw("Cannot compute matrix inverse because matr. larger than 4x4"); if (this->Det() == 0) throw("Cannot compute matrix inverse because singular matrix"); switch (this->GetRows()) { case 1: (*this)(0, 0) = (1 / (*this)(0, 0)); break; case 2: { ChMatrixDynamic<Real> inv(2, 2); inv(0, 0) = (*this)(1, 1); inv(0, 1) = -(*this)(0, 1); inv(1, 1) = (*this)(0, 0); inv(1, 0) = -(*this)(1, 0); inv.MatrDivScale(this->Det()); this->CopyFromMatrix(inv); break; } case 3: { ChMatrixDynamic<Real> inv(3, 3); inv(0, 0) = (*this)(1, 1) * (*this)(2, 2) - (*this)(1, 2) * (*this)(2, 1); inv(0, 1) = (*this)(2, 1) * (*this)(0, 2) - (*this)(0, 1) * (*this)(2, 2); inv(0, 2) = (*this)(0, 1) * (*this)(1, 2) - (*this)(0, 2) * (*this)(1, 1); inv(1, 0) = (*this)(1, 2) * (*this)(2, 0) - (*this)(1, 0) * (*this)(2, 2); inv(1, 1) = (*this)(2, 2) * (*this)(0, 0) - (*this)(2, 0) * (*this)(0, 2); inv(1, 2) = (*this)(0, 2) * (*this)(1, 0) - (*this)(1, 2) * (*this)(0, 0); inv(2, 0) = (*this)(1, 0) * (*this)(2, 1) - (*this)(1, 1) * (*this)(2, 0); inv(2, 1) = (*this)(0, 1) * (*this)(2, 0) - (*this)(0, 0) * (*this)(2, 1); inv(2, 2) = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0); inv.MatrDivScale(this->Det()); this->CopyFromMatrix(inv); break; } case 4: { ChMatrixDynamic<Real> inv(4, 4); inv.SetElement( 0, 0, (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) - (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) + (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) - (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) - (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) + (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3)); inv.SetElement( 0, 1, (*this)(0, 3) * (*this)(2, 2) * (*this)(3, 1) - (*this)(0, 2) * (*this)(2, 3) * (*this)(3, 1) - (*this)(0, 3) * (*this)(2, 1) * (*this)(3, 2) + (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 2) + (*this)(0, 2) * (*this)(2, 1) * (*this)(3, 3) - (*this)(0, 1) * (*this)(2, 2) * (*this)(3, 3)); inv.SetElement( 0, 2, (*this)(0, 2) * (*this)(1, 3) * (*this)(3, 1) - (*this)(0, 3) * (*this)(1, 2) * (*this)(3, 1) + (*this)(0, 3) * (*this)(1, 1) * (*this)(3, 2) - (*this)(0, 1) * (*this)(1, 3) * (*this)(3, 2) - (*this)(0, 2) * (*this)(1, 1) * (*this)(3, 3) + (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 3)); inv.SetElement( 0, 3, (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) - (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) - (*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) + (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) - (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3)); inv.SetElement( 1, 0, (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) - (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) - (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) + (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) + (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) - (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3)); inv.SetElement( 1, 1, (*this)(0, 2) * (*this)(2, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(2, 2) * (*this)(3, 0) + (*this)(0, 3) * (*this)(2, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 2) - (*this)(0, 2) * (*this)(2, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 3)); inv.SetElement( 1, 2, (*this)(0, 3) * (*this)(1, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(1, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 2) + (*this)(0, 2) * (*this)(1, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 3)); inv.SetElement( 1, 3, (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) - (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) + (*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) - (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) - (*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) + (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3)); inv.SetElement( 2, 0, (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) - (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) + (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) - (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) - (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) + (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3)); inv.SetElement( 2, 1, (*this)(0, 3) * (*this)(2, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(2, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 1) + (*this)(0, 1) * (*this)(2, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 3)); inv.SetElement( 2, 2, (*this)(0, 1) * (*this)(1, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 1) * (*this)(3, 0) + (*this)(0, 3) * (*this)(1, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 1) - (*this)(0, 1) * (*this)(1, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 3)); inv.SetElement( 2, 3, (*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) - (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) - (*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) + (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) + (*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) - (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3)); inv.SetElement( 3, 0, (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) - (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) - (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1) + (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) + (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) - (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2)); inv.SetElement( 3, 1, (*this)(0, 1) * (*this)(2, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(2, 1) * (*this)(3, 0) + (*this)(0, 2) * (*this)(2, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 1) - (*this)(0, 1) * (*this)(2, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 2)); inv.SetElement( 3, 2, (*this)(0, 2) * (*this)(1, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(1, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 1) + (*this)(0, 1) * (*this)(1, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 2)); inv.SetElement( 3, 3, (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) - (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) + (*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) - (*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) + (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2)); inv.MatrDivScale(this->Det()); this->CopyFromMatrix(inv); break; } } } /// Returns true if vector is identical to other matrix bool Equals(const ChMatrix<Real>& other) const { return Equals(other, 0.0); } /// Returns true if vector equals another vector, within a tolerance 'tol' bool Equals(const ChMatrix<Real>& other, Real tol) const { if ((other.GetColumns() != this->columns) || (other.GetRows() != this->rows)) return false; for (int nel = 0; nel < rows * columns; ++nel) if (fabs(ElementN(nel) - other.ElementN(nel)) > tol) return false; return true; } /// Multiplies this 3x4 matrix by a quaternion, as v=[G]*q /// The matrix must be 3x4. /// \return The result of the multiplication, i.e. a vector. template <class RealB> ChVector<Real> Matr34_x_Quat(const ChQuaternion<RealB>& qua) { assert((rows == 3) && (columns == 4)); return ChVector<Real>(Get34Element(0, 0) * (Real)qua.e0() + Get34Element(0, 1) * (Real)qua.e1() + Get34Element(0, 2) * (Real)qua.e2() + Get34Element(0, 3) * (Real)qua.e3(), Get34Element(1, 0) * (Real)qua.e0() + Get34Element(1, 1) * (Real)qua.e1() + Get34Element(1, 2) * (Real)qua.e2() + Get34Element(1, 3) * (Real)qua.e3(), Get34Element(2, 0) * (Real)qua.e0() + Get34Element(2, 1) * (Real)qua.e1() + Get34Element(2, 2) * (Real)qua.e2() + Get34Element(2, 3) * (Real)qua.e3()); } /// Multiplies this 3x4 matrix (transposed) by a vector, as q=[G]'*v /// The matrix must be 3x4. /// \return The result of the multiplication, i.e. a quaternion. template <class RealB> ChQuaternion<Real> Matr34T_x_Vect(const ChVector<RealB>& va) { assert((rows == 3) && (columns == 4)); return ChQuaternion<Real>( Get34Element(0, 0) * (Real)va.x() + Get34Element(1, 0) * (Real)va.y() + Get34Element(2, 0) * (Real)va.z(), Get34Element(0, 1) * (Real)va.x() + Get34Element(1, 1) * (Real)va.y() + Get34Element(2, 1) * (Real)va.z(), Get34Element(0, 2) * (Real)va.x() + Get34Element(1, 2) * (Real)va.y() + Get34Element(2, 2) * (Real)va.z(), Get34Element(0, 3) * (Real)va.x() + Get34Element(1, 3) * (Real)va.y() + Get34Element(2, 3) * (Real)va.z()); } /// Multiplies this 4x4 matrix (transposed) by a quaternion, /// The matrix must be 4x4. /// \return The result of the multiplication, i.e. a quaternion. template <class RealB> ChQuaternion<Real> Matr44_x_Quat(const ChQuaternion<RealB>& qua) { assert((rows == 4) && (columns == 4)); return ChQuaternion<Real>(Get44Element(0, 0) * (Real)qua.e0() + Get44Element(0, 1) * (Real)qua.e1() + Get44Element(0, 2) * (Real)qua.e2() + Get44Element(0, 3) * (Real)qua.e3(), Get44Element(1, 0) * (Real)qua.e0() + Get44Element(1, 1) * (Real)qua.e1() + Get44Element(1, 2) * (Real)qua.e2() + Get44Element(1, 3) * (Real)qua.e3(), Get44Element(2, 0) * (Real)qua.e0() + Get44Element(2, 1) * (Real)qua.e1() + Get44Element(2, 2) * (Real)qua.e2() + Get44Element(2, 3) * (Real)qua.e3(), Get44Element(3, 0) * (Real)qua.e0() + Get44Element(3, 1) * (Real)qua.e1() + Get44Element(3, 2) * (Real)qua.e2() + Get44Element(3, 3) * (Real)qua.e3()); } /// Transposes only the lower-right 3x3 submatrix of a hemisymmetric 4x4 matrix, /// used when the 4x4 matrix is a "star" matrix [q] coming from a quaternion q: /// the non commutative quat. product is: /// q1 x q2 = [q1]*q2 = [q2st]*q1 /// where [q2st] is the "semi-transpose of [q2]. void MatrXq_SemiTranspose() { SetElement(1, 2, -GetElement(1, 2)); SetElement(1, 3, -GetElement(1, 3)); SetElement(2, 1, -GetElement(2, 1)); SetElement(2, 3, -GetElement(2, 3)); SetElement(3, 1, -GetElement(3, 1)); SetElement(3, 2, -GetElement(3, 2)); } /// Change the sign of the 2nd, 3rd and 4th columns of a 4x4 matrix, /// The product between a quaternion q1 and the conjugate of q2 (q2'), is: /// q1 x q2' = [q1]*q2' = [q1sn]*q2 /// where [q1sn] is the semi-negation of the 4x4 matrix [q1]. void MatrXq_SemiNeg() { for (int i = 0; i < rows; ++i) for (int j = 1; j < columns; ++j) SetElement(i, j, -GetElement(i, j)); } /// Gets the norm infinite of the matrix, i.e. the max. /// of its elements in absolute value. Real NormInf() const { Real norm = 0; for (int nel = 0; nel < rows * columns; ++nel) if ((fabs(ElementN(nel))) > norm) norm = fabs(ElementN(nel)); return norm; } /// Gets the norm two of the matrix, i.e. the square root /// of the sum of the elements squared. Real NormTwo() const { Real norm = 0; for (int nel = 0; nel < rows * columns; ++nel) norm += ElementN(nel) * ElementN(nel); return (sqrt(norm)); } /// Finds max value among the values of the matrix Real Max() const { Real mmax = GetElement(0, 0); for (int nel = 0; nel < rows * columns; ++nel) if (ElementN(nel) > mmax) mmax = ElementN(nel); return mmax; } /// Finds min value among the values of the matrix Real Min() const { Real mmin = GetElement(0, 0); for (int nel = 0; nel < rows * columns; ++nel) if (ElementN(nel) < mmin) mmin = ElementN(nel); return mmin; } /// Linear interpolation of two matrices. Parameter mx must be 0...1. /// [this] =(1-x)[A]+ (x)[B] Matrices must have the same size!! void LinInterpolate(const ChMatrix<Real>& matra, const ChMatrix<Real>& matrb, Real mx) { assert(matra.columns == matrb.columns && matra.rows == matrb.rows); for (int nel = 0; nel < rows * columns; nel++) ElementN(nel) = matra.ElementN(nel) * (1 - mx) + matrb.ElementN(nel) * (mx); } /// Fills a matrix or a vector with a bilinear interpolation, /// from corner values (as a u-v patch). void RowColInterp(Real vmin, Real vmax, Real umin, Real umax) { for (int iu = 0; iu < GetColumns(); iu++) for (int iv = 0; iv < GetRows(); iv++) { if (GetRows() > 1) Element(iv, iu) = vmin + (vmax - vmin) * ((Real)iv / ((Real)(GetRows() - 1))); if (GetColumns() > 1) Element(iv, iu) += umin + (umax - umin) * ((Real)iu / ((Real)(GetColumns() - 1))); } } // // BOOKKEEPING // /// Paste a matrix "matra" into "this", inserting at location insrow-inscol. /// Normal copy for insrow=inscol=0 template <class RealB> void PasteMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) { for (int i = 0; i < matra.GetRows(); ++i) for (int j = 0; j < matra.GetColumns(); ++j) Element(i + insrow, j + inscol) = (Real)matra.Element(i, j); } /// Paste a matrix "matra" into "this", inserting at location insrow-inscol /// and performing a sum with the preexisting values. template <class RealB> void PasteSumMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) { for (int i = 0; i < matra.GetRows(); ++i) for (int j = 0; j < matra.GetColumns(); ++j) Element(i + insrow, j + inscol) += (Real)matra.Element(i, j); } /// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol. /// Normal copy for insrow=inscol=0 template <class RealB> void PasteTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) { for (int i = 0; i < matra.GetRows(); ++i) for (int j = 0; j < matra.GetColumns(); ++j) Element(j + insrow, i + inscol) = (Real)matra.Element(i, j); } /// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol /// and performing a sum with the preexisting values. template <class RealB> void PasteSumTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) { for (int i = 0; i < matra.GetRows(); ++i) for (int j = 0; j < matra.GetColumns(); ++j) Element(j + insrow, i + inscol) += (Real)matra.Element(i, j); } /// Paste a clipped portion of the matrix "matra" into "this", /// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol. template <class RealB> void PasteClippedMatrix(const ChMatrix<RealB>& matra, int cliprow, int clipcol, int nrows, int ncolumns, int insrow, int inscol) { for (int i = 0; i < nrows; ++i) for (int j = 0; j < ncolumns; ++j) Element(i + insrow, j + inscol) = (Real)matra.Element(i + cliprow, j + clipcol); } /// Paste a clipped portion of the matrix "matra" into "this", where "this" /// is a vector (of ChMatrix type), /// inserting the clip (of size nrows, ncolumns) at the location insindex. template <class RealB> void PasteClippedMatrixToVector(const ChMatrix<RealB>& matra, int cliprow, int clipcol, int nrows, int ncolumns, int insindex) { for (int i = 0; i < nrows; ++i) for (int j = 0; j < ncolumns; ++j) ElementN(insindex + i * ncolumns + j) = (Real)matra.Element(cliprow + i, clipcol + j); } /// Paste a clipped portion of a vector into "this", where "this" /// is a matrix (of ChMatrix type), /// inserting the clip (of size nrows, ncolumns) at the location insindex. template <class RealB> void PasteClippedVectorToMatrix(const ChMatrix<RealB>& matra, int cliprow, int clipcol, int nrows, int ncolumns, int insindex) { for (int i = 0; i < nrows; ++i) for (int j = 0; j < ncolumns; ++j) Element(i + cliprow, j + clipcol) = (Real)matra.ElementN(insindex + i * ncolumns + j); } /// Paste a clipped portion of the matrix "matra" into "this", performing a sum with preexisting values, /// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol. template <class RealB> void PasteSumClippedMatrix(const ChMatrix<RealB>& matra, int cliprow, int clipcol, int nrows, int ncolumns, int insrow, int inscol) { for (int i = 0; i < nrows; ++i) for (int j = 0; j < ncolumns; ++j) #pragma omp atomic Element(i + insrow, j + inscol) += (Real)matra.Element(i + cliprow, j + clipcol); } /// Paste a vector "va" into the matrix. template <class RealB> void PasteVector(const ChVector<RealB>& va, int insrow, int inscol) { SetElement(insrow + 0, inscol, (Real)va.x()); SetElement(insrow + 1, inscol, (Real)va.y()); SetElement(insrow + 2, inscol, (Real)va.z()); } /// Paste a vector "va" into the matrix, summing it with preexisting values. template <class RealB> void PasteSumVector(const ChVector<RealB>& va, int insrow, int inscol) { Element(insrow + 0, inscol) += (Real)va.x(); Element(insrow + 1, inscol) += (Real)va.y(); Element(insrow + 2, inscol) += (Real)va.z(); } /// Paste a vector "va" into the matrix, subtracting it from preexisting values. template <class RealB> void PasteSubVector(const ChVector<RealB>& va, int insrow, int inscol) { Element(insrow + 0, inscol) -= (Real)va.x(); Element(insrow + 1, inscol) -= (Real)va.y(); Element(insrow + 2, inscol) -= (Real)va.z(); } /// Paste a quaternion into the matrix. template <class RealB> void PasteQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) { SetElement(insrow + 0, inscol, (Real)qa.e0()); SetElement(insrow + 1, inscol, (Real)qa.e1()); SetElement(insrow + 2, inscol, (Real)qa.e2()); SetElement(insrow + 3, inscol, (Real)qa.e3()); } /// Paste a quaternion into the matrix, summing it with preexisting values. template <class RealB> void PasteSumQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) { Element(insrow + 0, inscol) += (Real)qa.e0(); Element(insrow + 1, inscol) += (Real)qa.e1(); Element(insrow + 2, inscol) += (Real)qa.e2(); Element(insrow + 3, inscol) += (Real)qa.e3(); } /// Paste a coordsys into the matrix. template <class RealB> void PasteCoordsys(const ChCoordsys<RealB>& cs, int insrow, int inscol) { PasteVector(cs.pos, insrow, inscol); PasteQuaternion(cs.rot, insrow + 3, inscol); } /// Returns the vector clipped from insrow, inscol. ChVector<Real> ClipVector(int insrow, int inscol) const { return ChVector<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol)); } /// Returns the quaternion clipped from insrow, inscol. ChQuaternion<Real> ClipQuaternion(int insrow, int inscol) const { return ChQuaternion<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol), Element(insrow + 3, inscol)); } /// Returns the coordsys clipped from insrow, inscol. ChCoordsys<Real> ClipCoordsys(int insrow, int inscol) const { return ChCoordsys<Real>(ClipVector(insrow, inscol), ClipQuaternion(insrow + 3, inscol)); } // // MULTIBODY SPECIFIC MATH FUCTION // /// Fills a 4x4 matrix as the "star" matrix, representing quaternion cross product. /// That is, given two quaternions a and b, aXb= [Astar]*b template <class RealB> void Set_Xq_matrix(const ChQuaternion<RealB>& q) { Set44Element(0, 0, (Real)q.e0()); Set44Element(0, 1, -(Real)q.e1()); Set44Element(0, 2, -(Real)q.e2()); Set44Element(0, 3, -(Real)q.e3()); Set44Element(1, 0, (Real)q.e1()); Set44Element(1, 1, (Real)q.e0()); Set44Element(1, 2, -(Real)q.e3()); Set44Element(1, 3, (Real)q.e2()); Set44Element(2, 0, (Real)q.e2()); Set44Element(2, 1, (Real)q.e3()); Set44Element(2, 2, (Real)q.e0()); Set44Element(2, 3, -(Real)q.e1()); Set44Element(3, 0, (Real)q.e3()); Set44Element(3, 1, -(Real)q.e2()); Set44Element(3, 2, (Real)q.e1()); Set44Element(3, 3, (Real)q.e0()); } }; } // end namespace chrono #endif
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(Index shift=0; shift<threads; ++shift) { Index i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 #pragma omp critical { for(Index i=0; i<threads; ++i) #pragma omp atomic --(info[i].users); } } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of GeneralProduct<> for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Lhs, typename Rhs> struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> > : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> > {}; template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; EIGEN_ALIGN_DEFAULT LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_DEFAULT RhsScalar m_staticB[SizeB]; public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; this->m_blockA = m_staticA; this->m_blockB = m_staticB; } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index m = this->m_mc; Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::evalTo(dst, lhs, rhs); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::addTo(dst, lhs, rhs); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::subTo(dst, lhs, rhs); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
nmt_mask_flat.c
#include "config.h" #include "utils.h" static void apodize_mask_CX(nmt_flatsky_info *fs,flouble *mask_in,flouble *mask_out, flouble aposize,char *apotype) { double aporad=aposize*M_PI/180; int apotyp=0; if(!strcmp(apotype,"C1")) apotyp=0; else if(!strcmp(apotype,"C2")) apotyp=1; else report_error(NMT_ERROR_APO,"Unknown apodization type %s\n",apotype); if(mask_out!=mask_in) memcpy(mask_out,mask_in,fs->nx*fs->ny*sizeof(flouble)); #pragma omp parallel default(none) \ shared(fs,mask_in,mask_out,aporad,apotyp) { int iy; flouble **rarr; double x_thr=aporad; double inv_x_thr=1./x_thr; flouble dx=fs->lx/fs->nx; flouble dy=fs->lx/fs->nx; int nx_patch=(int)(1.2*aporad/dx); int ny_patch=(int)(1.2*aporad/dy); rarr=my_malloc((2*ny_patch+1)*sizeof(flouble *)); for(iy=0;iy<=2*ny_patch;iy++) { int ix; flouble y=(iy-ny_patch)*dy; rarr[iy]=my_malloc((2*nx_patch+1)*sizeof(flouble)); for(ix=0;ix<=2*nx_patch;ix++) { flouble x=(ix-nx_patch)*dx; rarr[iy][ix]=sqrt(x*x+y*y); } } #pragma omp for schedule(dynamic) for(iy=0;iy<fs->ny;iy++) { int ix; for(ix=0;ix<fs->nx;ix++) { int index=ix+fs->nx*iy; flouble rmin=100000; if(mask_in[index]>0) { int iyy; for(iyy=0;iyy<=2*ny_patch;iyy++) { int ixx; if(iy+iyy-ny_patch<0) continue; if(iy+iyy-ny_patch>=fs->ny) break; for(ixx=0;ixx<=2*nx_patch;ixx++) { if(ix+ixx-nx_patch<0) continue; if(ix+ixx-nx_patch>=fs->nx) break; int index2=ix+ixx-nx_patch+fs->nx*(iy+iyy-ny_patch); if(mask_in[index2]<=0) if(rarr[iyy][ixx]<rmin) rmin=rarr[iyy][ixx]; } } if(rmin<x_thr) { flouble f,xn; if(rmin<0) f=0; else { xn=rmin*inv_x_thr; if(apotyp==0) f=xn-sin(xn*2*M_PI)/(2*M_PI); else f=0.5*(1-cos(xn*M_PI)); } mask_out[index]*=f; } } } } //end omp for for(iy=0;iy<=2*ny_patch;iy++) free(rarr[iy]); free(rarr); } //end omp parallel } static void apodize_mask_smooth(nmt_flatsky_info *fs,flouble *mask_in,flouble *mask_out,flouble aposize) { long npix=fs->nx*fs->ny; double aporad=aposize*M_PI/180; flouble *mask_dum=my_malloc(npix*sizeof(flouble)); fcomplex *alms_dum=my_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex)); memcpy(mask_dum,mask_in,npix*sizeof(flouble)); #pragma omp parallel default(none) \ shared(fs,npix,mask_in,mask_dum,aporad) { int iy; flouble **rarr; double x_thr=2.5*aporad; flouble dx=fs->lx/fs->nx; flouble dy=fs->lx/fs->nx; int nx_patch=(int)(1.2*x_thr/dx); int ny_patch=(int)(1.2*x_thr/dy); rarr=my_malloc((2*ny_patch+1)*sizeof(flouble *)); for(iy=0;iy<=2*ny_patch;iy++) { int ix; flouble y=(iy-ny_patch)*dy; rarr[iy]=my_malloc((2*nx_patch+1)*sizeof(flouble)); for(ix=0;ix<=2*nx_patch;ix++) { flouble x=(ix-nx_patch)*dx; rarr[iy][ix]=sqrt(x*x+y*y); } } #pragma omp for schedule(dynamic) for(iy=0;iy<fs->ny;iy++) { int ix; for(ix=0;ix<fs->nx;ix++) { int index=ix+fs->nx*iy; if(mask_in[index]<=0) { int iyy; for(iyy=0;iyy<=2*ny_patch;iyy++) { int ixx; if(iy+iyy-ny_patch<0) continue; if(iy+iyy-ny_patch>=fs->ny) break; for(ixx=0;ixx<=2*nx_patch;ixx++) { if(ix+ixx-nx_patch<0) continue; if(ix+ixx-nx_patch>=fs->nx) break; if(rarr[iyy][ixx]<=x_thr) { int index2=ix+ixx-nx_patch+fs->nx*(iy+iyy-ny_patch); mask_dum[index2]*=0; } } } } } } //end omp for for(iy=0;iy<=2*ny_patch;iy++) free(rarr[iy]); free(rarr); } //end omp parallel fs_map2alm(fs,1,0,&mask_dum,&alms_dum); fs_alter_alm(fs,aporad*180*60*2.355/M_PI,alms_dum,alms_dum,NULL,0); fs_alm2map(fs,1,0,&mask_dum,&alms_dum); fs_map_product(fs,mask_in,mask_dum,mask_out); free(mask_dum); free(alms_dum); } void nmt_apodize_mask_flat(int nx,int ny,flouble lx,flouble ly, flouble *mask_in,flouble *mask_out,flouble aposize,char *apotype) { if(aposize<0) report_error(NMT_ERROR_APO,"Apodization scale must be a positive number\n"); else if(aposize==0) { int ii; for(ii=0;ii<nx*ny;ii++) mask_out[ii]=mask_in[ii]; } else { nmt_flatsky_info *fs=nmt_flatsky_info_alloc(nx,ny,lx,ly); if((!strcmp(apotype,"C1")) || (!strcmp(apotype,"C2"))) { apodize_mask_CX(fs,mask_in,mask_out,aposize,apotype); } else if(!strcmp(apotype,"Smooth")) apodize_mask_smooth(fs,mask_in,mask_out,aposize); else { nmt_flatsky_info_free(fs); report_error(NMT_ERROR_APO,"Unknown apodization type %s. Allowed: \"Smooth\", \"C1\", \"C2\"\n",apotype); } nmt_flatsky_info_free(fs); } }
trmm_x_sky_n_lo_col_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { #ifdef COMPLEX ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < mat->rows; i++) for(ALPHA_INT j = 0; j < columns; j++) alpha_mul(y[index2(j, i, ldy)], y[index2(j, i, ldy)], beta); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { for (ALPHA_INT ac = 0; ac < mat->cols; ++ac) { ALPHA_INT start = mat->pointers[ac]; ALPHA_INT end = mat->pointers[ac + 1]; ALPHA_INT idx = 1; ALPHA_INT eles_num = end - start; for (ALPHA_INT ai = start; ai < end; ++ai) { ALPHA_INT cr = ac - eles_num + idx; if (ac >= cr) { ALPHA_Number t; alpha_mul_3c(t, alpha, mat->values[ai]); alpha_madde(y[index2(cc, cr, ldy)], t, x[index2(cc, ac, ldx)]); } idx++; } } } return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }